10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
26 template<
typename LhsXprType,
typename RhsXprType>
27 struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
29 typedef typename LhsXprType::Scalar Scalar;
31 typedef typename promote_index_type<typename traits<LhsXprType>::Index,
33 typedef typename LhsXprType::Nested LhsNested;
34 typedef typename RhsXprType::Nested RhsNested;
35 typedef std::remove_reference_t<LhsNested> LhsNested_;
36 typedef std::remove_reference_t<RhsNested> RhsNested_;
37 static constexpr std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
38 static constexpr
int Layout = internal::traits<LhsXprType>::Layout;
46 template<
typename LhsXprType,
typename RhsXprType>
47 struct eval<TensorAssignOp<LhsXprType, RhsXprType>,
Eigen::Dense>
49 typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
52 template<
typename LhsXprType,
typename RhsXprType>
53 struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
55 typedef TensorAssignOp<LhsXprType, RhsXprType> type;
62 template<
typename LhsXprType,
typename RhsXprType>
66 typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar
Scalar;
69 typedef typename Eigen::internal::nested<TensorAssignOp>::type
Nested;
70 typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind
StorageKind;
71 typedef typename Eigen::internal::traits<TensorAssignOp>::Index
Index;
73 static constexpr
int NumDims = Eigen::internal::traits<TensorAssignOp>::NumDimensions;
93 template<
typename LeftArgType,
typename RightArgType,
typename Device>
106 static constexpr
int NumDims = XprType::NumDims;
130 m_leftImpl(op.lhsExpression(), device),
131 m_rightImpl(op.rhsExpression(), device)
136 YOU_MADE_A_PROGRAMMING_MISTAKE);
144 return m_rightImpl.dimensions();
149 m_leftImpl.evalSubExprsIfNeeded(NULL);
154 return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
157 #ifdef EIGEN_USE_THREADS
158 template <
typename EvalSubExprsCallback>
159 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(
161 m_leftImpl.evalSubExprsIfNeededAsync(
nullptr, [
this, done](
bool) {
162 m_rightImpl.evalSubExprsIfNeededAsync(
163 m_leftImpl.data(), [done](
bool need_assign) { done(need_assign); });
169 m_leftImpl.cleanup();
170 m_rightImpl.cleanup();
174 m_leftImpl.coeffRef(
i) = m_rightImpl.coeff(
i);
180 m_leftImpl.template writePacket<LhsStoreMode>(
i, m_rightImpl.template packet<RhsLoadMode>(
i));
184 return m_leftImpl.coeff(index);
186 template<
int LoadMode>
189 return m_leftImpl.template packet<LoadMode>(index);
197 TensorOpCost left = m_leftImpl.costPerCoeff(vectorized);
198 return m_rightImpl.costPerCoeff(vectorized) +
207 return internal::TensorBlockResourceRequirements::merge(
208 m_leftImpl.getResourceRequirements(),
209 m_rightImpl.getResourceRequirements());
215 m_leftImpl.
data() != NULL) {
218 desc.template AddDestinationBuffer<Layout>(
219 m_leftImpl.data() + desc.offset(),
220 internal::strides<Layout>(m_leftImpl.dimensions()));
226 m_leftImpl.writeBlock(desc,
block);
#define EIGEN_DEVICE_FUNC
#define EIGEN_STATIC_ASSERT(X, MSG)
const internal::remove_all_t< typename RhsXprType::Nested > & m_rhs_xpr
internal::remove_all_t< typename LhsXprType::Nested > & m_lhs_xpr
LhsXprType::CoeffReturnType CoeffReturnType
Eigen::internal::traits< TensorAssignOp >::StorageKind StorageKind
Eigen::internal::traits< TensorAssignOp >::Scalar Scalar
static constexpr int NumDims
Eigen::internal::nested< TensorAssignOp >::type Nested
TensorAssignOp(LhsXprType &lhs, const RhsXprType &rhs)
const internal::remove_all_t< typename RhsXprType::Nested > & rhsExpression() const
internal::remove_all_t< typename LhsXprType::Nested > & lhsExpression() const
Eigen::NumTraits< Scalar >::Real RealScalar
Eigen::internal::traits< TensorAssignOp >::Index Index
double bytes_stored() const
double bytes_loaded() const
double compute_cycles() const
typename remove_all< T >::type remove_all_t
EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
EIGEN_ALWAYS_INLINE bool dimensions_match(Dims1 dims1, Dims2 dims2)
internal::packet_traits< Scalar >::type type
TensorOpCost costPerCoeff(bool vectorized) const
Storage::Type EvaluatorPointerType
TensorAssignOp< LeftArgType, RightArgType > XprType
TensorEvaluator< RightArgType, Device >::Dimensions Dimensions
const Dimensions & dimensions() const
TensorEvaluator(const XprType &op, const Device &device)
void evalBlock(TensorBlockDesc &desc, TensorBlockScratch &scratch)
PacketReturnType packet(Index index) const
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
EvaluatorPointerType data() const
bool evalSubExprsIfNeeded(EvaluatorPointerType)
void evalPacket(Index i) const
XprType::CoeffReturnType CoeffReturnType
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
void evalScalar(Index i) const
TensorEvaluator< const RightArgType, Device >::TensorBlock RightTensorBlock
StorageMemory< CoeffReturnType, Device > Storage
PacketType< CoeffReturnType, Device >::type PacketReturnType
internal::TensorBlockResourceRequirements getResourceRequirements() const
TensorEvaluator< LeftArgType, Device > m_leftImpl
TensorEvaluator< RightArgType, Device > m_rightImpl
CoeffReturnType coeff(Index index) const
A cost model used to limit the number of threads used for evaluating tensor expression.
static constexpr int Layout
TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
Storage::Type EvaluatorPointerType
static constexpr int PacketSize
EvaluatorPointerType data() const
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock