10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
25 template<
typename TargetType,
typename XprType>
26 struct traits<TensorConversionOp<TargetType, XprType> >
29 typedef TargetType Scalar;
32 typedef typename XprType::Nested Nested;
33 typedef std::remove_reference_t<Nested> Nested_;
37 typedef typename TypeConversion<Scalar, typename traits<XprType>::PointerType>::type PointerType;
40 template<
typename TargetType,
typename XprType>
41 struct eval<TensorConversionOp<TargetType, XprType>,
Eigen::Dense>
43 typedef const TensorConversionOp<TargetType, XprType>& type;
46 template<
typename TargetType,
typename XprType>
47 struct nested<TensorConversionOp<TargetType, XprType>, 1, typename eval<TensorConversionOp<TargetType, XprType> >::type>
49 typedef TensorConversionOp<TargetType, XprType> type;
55 template <
typename TensorEvaluator,
typename SrcPacket,
typename TgtPacket,
int SrcCoeffRatio,
int TgtCoeffRatio>
58 template <
typename TensorEvaluator,
typename SrcPacket,
typename TgtPacket>
64 template<
int LoadMode,
typename Index>
66 return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index));
74 template <
typename TensorEvaluator,
typename SrcPacket,
typename TgtPacket>
80 template<
int LoadMode,
typename Index>
82 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
84 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
85 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
86 TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2);
94 template <
typename TensorEvaluator,
typename SrcPacket,
typename TgtPacket>
100 template<
int LoadMode,
typename Index>
102 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
104 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
105 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
106 SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
107 SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
108 TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4);
116 template <
typename TensorEvaluator,
typename SrcPacket,
typename TgtPacket>
122 template<
int LoadMode,
typename Index>
124 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
126 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
127 SrcPacket src2 = m_impl.template packet<LoadMode>(index + 1 * SrcPacketSize);
128 SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
129 SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
130 SrcPacket src5 = m_impl.template packet<LoadMode>(index + 4 * SrcPacketSize);
131 SrcPacket src6 = m_impl.template packet<LoadMode>(index + 5 * SrcPacketSize);
132 SrcPacket src7 = m_impl.template packet<LoadMode>(index + 6 * SrcPacketSize);
133 SrcPacket src8 = m_impl.template packet<LoadMode>(index + 7 * SrcPacketSize);
134 TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4, src5, src6, src7, src8);
142 template <
typename TensorEvaluator,
typename SrcPacket,
typename TgtPacket,
int TgtCoeffRatio>
146 : m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {}
148 template<
int LoadMode,
typename Index>
150 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
154 if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) {
156 return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<Unaligned>(index));
158 const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size;
159 typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
160 typedef typename internal::unpacket_traits<TgtPacket>::type TgtType;
161 internal::scalar_cast_op<SrcType, TgtType> converter;
162 EIGEN_ALIGN_MAX typename internal::unpacket_traits<TgtPacket>::type values[TgtPacketSize];
164 for (
int i = 0;
i < TgtPacketSize; ++
i) {
165 values[
i] = converter(m_impl.coeff(index+
i));
167 TgtPacket rslt = internal::pload<TgtPacket>(values);
177 template<
typename TargetType,
typename XprType>
181 typedef typename internal::traits<TensorConversionOp>::Scalar
Scalar;
182 typedef typename internal::traits<TensorConversionOp>::StorageKind
StorageKind;
183 typedef typename internal::traits<TensorConversionOp>::Index
Index;
184 typedef typename internal::nested<TensorConversionOp>::type
Nested;
200 static EIGEN_STRONG_INLINE
bool run(Eval& impl, EvalPointerType) {
201 impl.evalSubExprsIfNeeded(NULL);
207 static EIGEN_STRONG_INLINE
bool run(Eval& impl, EvalPointerType data) {
208 return impl.evalSubExprsIfNeeded(
data);
212 #ifdef EIGEN_USE_THREADS
213 template <
bool SameType,
typename Eval,
typename EvalPointerType,
214 typename EvalSubExprsCallback>
215 struct ConversionSubExprEvalAsync {
216 static EIGEN_STRONG_INLINE
void run(Eval& impl, EvalPointerType, EvalSubExprsCallback done) {
217 impl.evalSubExprsIfNeededAsync(
nullptr, std::move(done));
221 template <
typename Eval,
typename EvalPointerType,
222 typename EvalSubExprsCallback>
223 struct ConversionSubExprEvalAsync<true, Eval, EvalPointerType,
224 EvalSubExprsCallback> {
225 static EIGEN_STRONG_INLINE
void run(Eval& impl, EvalPointerType data, EvalSubExprsCallback done) {
226 impl.evalSubExprsIfNeededAsync(data, std::move(done));
233 template <
typename SrcType,
typename TargetType,
bool IsSameT>
235 template <
typename ArgType,
typename Device>
236 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(
const TensorEvaluator<ArgType, Device>& impl,
Index index) {
237 internal::scalar_cast_op<SrcType, TargetType> converter;
238 return converter(impl.coeff(index));
242 template <
typename SrcType,
typename TargetType>
243 struct CoeffConv<SrcType, TargetType, true> {
244 template <
typename ArgType,
typename Device>
245 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(
const TensorEvaluator<ArgType, Device>& impl,
Index index) {
246 return impl.coeff(index);
250 template <
typename SrcPacket,
typename TargetPacket,
int LoadMode,
bool ActuallyVectorize,
bool IsSameT>
252 typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
253 typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
255 static constexpr
int PacketSize = internal::unpacket_traits<TargetPacket>::size;
257 template <
typename ArgType,
typename Device>
258 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(
const TensorEvaluator<ArgType, Device>& impl,
Index index) {
259 internal::scalar_cast_op<SrcType, TargetType> converter;
262 for (
int i = 0;
i < PacketSize; ++
i) {
263 values[
i] = converter(impl.coeff(index+i));
265 TargetPacket rslt = internal::pload<TargetPacket>(values);
270 template <
typename SrcPacket,
typename TargetPacket,
int LoadMode,
bool IsSameT>
271 struct PacketConv<SrcPacket, TargetPacket, LoadMode, true, IsSameT> {
272 typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
273 typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
275 template <
typename ArgType,
typename Device>
276 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(
const TensorEvaluator<ArgType, Device>& impl,
Index index) {
277 const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
278 const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
279 PacketConverter<TensorEvaluator<ArgType, Device>, SrcPacket, TargetPacket,
280 SrcCoeffRatio, TgtCoeffRatio> converter(impl);
281 return converter.template packet<LoadMode>(index);
285 template <
typename SrcPacket,
typename TargetPacket,
int LoadMode>
286 struct PacketConv<SrcPacket, TargetPacket, LoadMode, false, true> {
287 typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
288 static constexpr
int PacketSize = internal::unpacket_traits<TargetPacket>::size;
290 template <
typename ArgType,
typename Device>
291 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(
const TensorEvaluator<ArgType, Device>& impl,
Index index) {
293 for (
int i = 0;
i < PacketSize; ++
i) values[i] = impl.coeff(index+i);
294 return internal::pload<TargetPacket>(values);
298 template <
typename SrcPacket,
typename TargetPacket,
int LoadMode>
299 struct PacketConv<SrcPacket, TargetPacket, LoadMode, true, true> {
300 template <
typename ArgType,
typename Device>
301 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(
const TensorEvaluator<ArgType, Device>& impl,
Index index) {
302 return impl.template packet<LoadMode>(index);
309 template<
typename TargetType,
typename ArgType,
typename Device>
321 static constexpr
bool IsSameType = internal::is_same<TargetType, SrcType>::value;
328 #ifndef EIGEN_USE_SYCL
332 internal::type_casting_traits<SrcType, TargetType>::VectorizedCast,
340 static constexpr
int NumDims = internal::array_size<Dimensions>::value;
349 struct TensorConversionOpBlockFactory {
350 template <
typename ArgXprType>
355 template <
typename ArgXprType>
361 typedef internal::TensorUnaryExprBlock<TensorConversionOpBlockFactory,
367 : m_impl(op.expression(), device)
378 #ifdef EIGEN_USE_THREADS
379 template <
typename EvalSubExprsCallback>
380 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(
382 ConversionSubExprEvalAsync<IsSameType, TensorEvaluator<ArgType, Device>,
384 EvalSubExprsCallback>::run(m_impl,
data, std::move(done));
395 return internal::CoeffConv<SrcType, TargetType, IsSameType>::run(m_impl,index);
398 template<
int LoadMode>
404 const bool Vectorizable =
408 int(internal::type_casting_traits<SrcType, TargetType>::VectorizedCast);
411 Vectorizable, IsSameType>::run(m_impl, index);
416 const double cast_cost = TensorOpCost::CastCost<SrcType, TargetType>();
418 const double SrcCoeffRatio =
419 internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
420 const double TgtCoeffRatio =
421 internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
422 return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio /
PacketSize) +
425 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, cast_cost);
431 return m_impl.getResourceRequirements();
436 bool =
false)
const {
438 TensorConversionOpBlockFactory());
#define EIGEN_UNROLL_LOOP
#define EIGEN_DEVICE_FUNC
Tensor conversion class. This class makes it possible to vectorize type casting operations when the n...
internal::traits< TensorConversionOp >::StorageKind StorageKind
TensorConversionOp(const XprType &xpr)
NumTraits< Scalar >::Real RealScalar
internal::traits< TensorConversionOp >::Index Index
internal::traits< TensorConversionOp >::Scalar Scalar
const internal::remove_all_t< typename XprType::Nested > & expression() const
internal::nested< TensorConversionOp >::type Nested
typename remove_all< T >::type remove_all_t
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
static bool run(Eval &impl, EvalPointerType data)
static bool run(Eval &impl, EvalPointerType)
const TensorEvaluator & m_impl
TgtPacket packet(Index index) const
PacketConverter(const TensorEvaluator &impl)
TgtPacket packet(Index index) const
const TensorEvaluator & m_impl
PacketConverter(const TensorEvaluator &impl)
const TensorEvaluator::Index m_maxIndex
PacketConverter(const TensorEvaluator &impl)
TgtPacket packet(Index index) const
const TensorEvaluator & m_impl
PacketConverter(const TensorEvaluator &impl)
const TensorEvaluator & m_impl
TgtPacket packet(Index index) const
PacketConverter(const TensorEvaluator &impl)
const TensorEvaluator & m_impl
TgtPacket packet(Index index) const
internal::packet_traits< Scalar >::type type
StorageMemory< CoeffReturnType, Device > Storage
TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
TargetType CoeffReturnType
TensorEvaluator(const XprType &op, const Device &device)
const Dimensions & dimensions() const
PacketReturnType packet(Index index) const
EvaluatorPointerType data() const
PacketType< SrcType, Device >::type PacketSourceType
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
const TensorEvaluator< ArgType, Device > & impl() const
required by sycl in order to extract the sycl accessor
PacketType< CoeffReturnType, Device >::type PacketReturnType
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
CoeffReturnType coeff(Index index) const
bool evalSubExprsIfNeeded(EvaluatorPointerType data)
TensorEvaluator< ArgType, Device > m_impl
TensorConversionOp< TargetType, ArgType > XprType
internal::TensorUnaryExprBlock< TensorConversionOpBlockFactory, ArgTensorBlock > TensorBlock
internal::remove_all_t< typename internal::traits< ArgType >::Scalar > SrcType
internal::TensorBlockResourceRequirements getResourceRequirements() const
TensorEvaluator< const ArgType, Device >::TensorBlock ArgTensorBlock
TensorEvaluator< ArgType, Device >::Dimensions Dimensions
Storage::Type EvaluatorPointerType
TensorOpCost costPerCoeff(bool vectorized) const
TensorConversionOp< TargetType, const ArgXprType > type
XprType< ArgXprType >::type expr(const ArgXprType &expr) const
A cost model used to limit the number of threads used for evaluating tensor expression.
static constexpr int Layout
Storage::Type EvaluatorPointerType
static constexpr int PacketSize
EvaluatorPointerType data() const
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock
PacketType< CoeffReturnType, Device >::type PacketReturnType