TensorForcedEval.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
24 namespace internal {
25 template<typename XprType>
26 struct traits<TensorForcedEvalOp<XprType> >
27 {
28  // Type promotion to handle the case where the types of the lhs and the rhs are different.
29  typedef typename XprType::Scalar Scalar;
30  typedef traits<XprType> XprTraits;
31  typedef typename traits<XprType>::StorageKind StorageKind;
32  typedef typename traits<XprType>::Index Index;
33  typedef typename XprType::Nested Nested;
34  typedef std::remove_reference_t<Nested> Nested_;
35  static constexpr int NumDimensions = XprTraits::NumDimensions;
36  static constexpr int Layout = XprTraits::Layout;
37  typedef typename XprTraits::PointerType PointerType;
38 
39  enum {
40  Flags = 0
41  };
42 };
43 
44 template<typename XprType>
45 struct eval<TensorForcedEvalOp<XprType>, Eigen::Dense>
46 {
47  typedef const TensorForcedEvalOp<XprType>& type;
48 };
49 
50 template<typename XprType>
51 struct nested<TensorForcedEvalOp<XprType>, 1, typename eval<TensorForcedEvalOp<XprType> >::type>
52 {
53  typedef TensorForcedEvalOp<XprType> type;
54 };
55 
56 } // end namespace internal
57 
58 
59 
60 template<typename XprType>
61 class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType>, ReadOnlyAccessors>
62 {
63  public:
64  typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar;
66  typedef std::remove_const_t<typename XprType::CoeffReturnType> CoeffReturnType;
67  typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested;
68  typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind;
69  typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index;
70 
71  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr)
72  : m_xpr(expr) {}
73 
76  expression() const { return m_xpr; }
77 
78  protected:
79  typename XprType::Nested m_xpr;
80 };
81 
82 namespace internal {
83 template <typename Device, typename CoeffReturnType>
84 struct non_integral_type_placement_new{
85  template <typename StorageType>
86 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index numValues, StorageType m_buffer) {
87  // Initialize non-trivially constructible types.
88  if (!internal::is_arithmetic<CoeffReturnType>::value) {
89  for (Index i = 0; i < numValues; ++i) new (m_buffer + i) CoeffReturnType();
90  }
91 }
92 };
93 
94 // SYCL does not support non-integral types
95 // having new (m_buffer + i) CoeffReturnType() causes the following compiler error for SYCL Devices
96 // no matching function for call to 'operator new'
97 template <typename CoeffReturnType>
98 struct non_integral_type_placement_new<Eigen::SyclDevice, CoeffReturnType> {
99  template <typename StorageType>
100 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index, StorageType) {
101 }
102 };
103 } // end namespace internal
104 
105 template<typename ArgType_, typename Device>
106 struct TensorEvaluator<const TensorForcedEvalOp<ArgType_>, Device>
107 {
110  typedef typename ArgType::Scalar Scalar;
112  typedef typename XprType::Index Index;
116  typedef typename Eigen::internal::traits<XprType>::PointerType TensorPointerType;
119 
120  enum {
121  IsAligned = true,
123  BlockAccess = internal::is_arithmetic<CoeffReturnType>::value,
124  PreferBlockAccess = false,
125  RawAccess = true
126  };
127 
129  static constexpr int NumDims = internal::traits<ArgType>::NumDimensions;
130 
131  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
132  typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
133  typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
134 
135  typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
136  Layout, Index>
138  //===--------------------------------------------------------------------===//
139 
140  TensorEvaluator(const XprType& op, const Device& device)
141  : m_impl(op.expression(), device), m_op(op.expression()),
142  m_device(device), m_buffer(NULL)
143  { }
144 
145  EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
146 
147  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
148  const Index numValues = internal::array_prod(m_impl.dimensions());
149  m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType)));
150 
151  internal::non_integral_type_placement_new<Device, CoeffReturnType>()(numValues, m_buffer);
152 
154  EvalTo evalToTmp(m_device.get(m_buffer), m_op);
155 
156  internal::TensorExecutor<
157  const EvalTo, std::remove_const_t<Device>,
158  /*Vectorizable=*/internal::IsVectorizable<Device, const ArgType>::value,
159  /*Tiling=*/internal::IsTileable<Device, const ArgType>::value>::
160  run(evalToTmp, m_device);
161 
162  return true;
163  }
164 
165 #ifdef EIGEN_USE_THREADS
166  template <typename EvalSubExprsCallback>
167  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
168  EvaluatorPointerType, EvalSubExprsCallback done) {
169  const Index numValues = internal::array_prod(m_impl.dimensions());
170  m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(
171  numValues * sizeof(CoeffReturnType)));
173  EvalTo;
174  EvalTo evalToTmp(m_device.get(m_buffer), m_op);
175 
176  auto on_done = std::bind([](EvalSubExprsCallback done_) { done_(true); },
177  std::move(done));
178  internal::TensorAsyncExecutor<
179  const EvalTo, std::remove_const_t<Device>,
180  decltype(on_done),
181  /*Vectorizable=*/internal::IsVectorizable<Device, const ArgType>::value,
182  /*Tiling=*/internal::IsTileable<Device, const ArgType>::value>::
183  runAsync(evalToTmp, m_device, std::move(on_done));
184  }
185 #endif
186 
187  EIGEN_STRONG_INLINE void cleanup() {
188  m_device.deallocate_temp(m_buffer);
189  m_buffer = NULL;
190  }
191 
192  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
193  {
194  return m_buffer[index];
195  }
196 
197  template<int LoadMode>
198  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
199  {
200  return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
201  }
202 
203  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
204  internal::TensorBlockResourceRequirements getResourceRequirements() const {
205  return internal::TensorBlockResourceRequirements::any();
206  }
207 
208  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
210  bool /*root_of_expr_ast*/ = false) const {
211  eigen_assert(m_buffer != NULL);
212  return TensorBlock::materialize(m_buffer, m_impl.dimensions(), desc, scratch);
213  }
214 
215  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
216  return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
217  }
218 
219  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
220  EvaluatorPointerType data() const { return m_buffer; }
221 
222  private:
224  const ArgType m_op;
227 };
228 
229 
230 } // end namespace Eigen
231 
232 #endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
int i
IndexedView_or_VectorBlock operator()(const Indices &indices)
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
#define EIGEN_DEVICE_REF
Definition: TensorMacros.h:36
The tensor base class.
Eigen::internal::traits< TensorForcedEvalOp >::Scalar Scalar
Eigen::internal::traits< TensorForcedEvalOp >::Index Index
std::remove_const_t< typename XprType::CoeffReturnType > CoeffReturnType
TensorForcedEvalOp(const XprType &expr)
Eigen::internal::traits< TensorForcedEvalOp >::StorageKind StorageKind
Eigen::internal::nested< TensorForcedEvalOp >::type Nested
Eigen::NumTraits< Scalar >::Real RealScalar
const internal::remove_all_t< typename XprType::Nested > & expression() const
typename remove_all< T >::type remove_all_t
constexpr auto array_prod(const array< T, N > &arr) -> decltype(array_reduce< product_op, T, N >(arr, static_cast< T >(1)))
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
internal::packet_traits< Scalar >::type type
Definition: TensorMeta.h:55
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
internal::TensorBlockResourceRequirements getResourceRequirements() const
Eigen::internal::traits< XprType >::PointerType TensorPointerType
TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
internal::TensorMaterializedBlock< CoeffReturnType, NumDims, Layout, Index > TensorBlock
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
A cost model used to limit the number of threads used for evaluating tensor expression.
static constexpr int Layout
const Device EIGEN_DEVICE_REF m_device
Storage::Type EvaluatorPointerType
static constexpr int PacketSize
Derived::Scalar CoeffReturnType
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock