Tensor.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_H
13 
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
65 template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
66 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
67 {
68  public:
71  typedef typename Eigen::internal::nested<Self>::type Nested;
72  typedef typename internal::traits<Self>::StorageKind StorageKind;
73  typedef typename internal::traits<Self>::Index Index;
74  typedef Scalar_ Scalar;
76  typedef typename Base::CoeffReturnType CoeffReturnType;
77 
78  enum {
80  CoordAccess = true,
81  RawAccess = true
82  };
83 
84  static constexpr int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
85  static constexpr int Options = Options_;
86  static constexpr int NumIndices = NumIndices_;
88 
89  protected:
91 
92  template<typename CustomIndices>
94  static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
96  static const bool value = is_array | is_int;
97  };
98 
99  public:
100  // Metadata
101  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
102  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
103  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
104  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
105  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
106  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
107 
108  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
109  // work, because that uses base().coeffRef() - and we don't yet
110  // implement a similar class hierarchy
111  inline Self& base() { return *this; }
112  inline const Self& base() const { return *this; }
113 
114  template<typename... IndexTypes>
115  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
116  {
117  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
118  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
119  return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
120  }
121 
122  // normal indices
123  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
124  {
126  return m_storage.data()[linearizedIndex(indices)];
127  }
128 
129  // custom indices
130  template<typename CustomIndices,
132  >
133  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
134  {
135  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
136  }
137 
138  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
139  {
140  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
141  return m_storage.data()[0];
142  }
143 
144  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
145  {
146  eigen_internal_assert(index >= 0 && index < size());
147  return m_storage.data()[index];
148  }
149 
150  template<typename... IndexTypes>
151  inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
152  {
153  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
154  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
155  return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
156  }
157 
158  // normal indices
159  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
160  {
162  return m_storage.data()[linearizedIndex(indices)];
163  }
164 
165  // custom indices
166  template<typename CustomIndices,
168  >
169  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
170  {
171  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
172  }
173 
174  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
175  {
176  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
177  return m_storage.data()[0];
178  }
179 
180  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
181  {
182  eigen_internal_assert(index >= 0 && index < size());
183  return m_storage.data()[index];
184  }
185 
186  template<typename... IndexTypes>
187  inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
188  {
189  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
190  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
191  return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
192  }
193 
194  // custom indices
195  template<typename CustomIndices,
197  >
198  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
199  {
200  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
201  }
202 
203  // normal indices
204  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
205  {
206  return coeff(indices);
207  }
208 
209  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
210  {
211  eigen_internal_assert(index >= 0 && index < size());
212  return coeff(index);
213  }
214 
215  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
216  {
217  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
218  return coeff();
219  }
220 
221  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
222  {
223  // The bracket operator is only for vectors, use the parenthesis operator instead.
224  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
225  return coeff(index);
226  }
227 
228  template<typename... IndexTypes>
229  inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
230  {
231  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
232  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
233  return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
234  }
235 
236  // normal indices
237  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
238  {
239  return coeffRef(indices);
240  }
241 
242  // custom indices
243  template<typename CustomIndices,
245  >
246  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
247  {
248  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
249  }
250 
251  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
252  {
253  eigen_assert(index >= 0 && index < size());
254  return coeffRef(index);
255  }
256 
257  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
258  {
259  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
260  return coeffRef();
261  }
262 
263  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
264  {
265  // The bracket operator is only for vectors, use the parenthesis operator instead
266  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
267  return coeffRef(index);
268  }
269 
271  EIGEN_STRONG_INLINE Tensor()
272  : m_storage()
273  {
274  }
275 
277  EIGEN_STRONG_INLINE Tensor(const Self& other)
278  : Base(other), m_storage(other.m_storage)
279  {
280  }
281 
282  template<typename... IndexTypes>
283  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
284  : m_storage(firstDimension, otherDimensions...)
285  {
286  // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
287  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
288  }
289 
291  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
293  {
295  }
296 
297  template<typename OtherDerived>
299  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
300  {
302  Assign assign(*this, other.derived());
304  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
305  }
306 
307  template<typename OtherDerived>
309  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
310  {
312  Assign assign(*this, other.derived());
314  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
315  }
316 
318  EIGEN_STRONG_INLINE Tensor(Self&& other)
319  : m_storage(std::move(other.m_storage))
320  {
321  }
323  EIGEN_STRONG_INLINE Tensor& operator=(Self&& other)
324  {
325  m_storage = std::move(other.m_storage);
326  return *this;
327  }
328 
330  EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
331  {
333  Assign assign(*this, other);
335  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
336  return *this;
337  }
338  template<typename OtherDerived>
340  EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
341  {
343  Assign assign(*this, other);
345  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
346  return *this;
347  }
348 
349  template<typename... IndexTypes> EIGEN_DEVICE_FUNC
350  void resize(Index firstDimension, IndexTypes... otherDimensions)
351  {
352  // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
353  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
354  resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
355  }
356 
359  {
360  int i;
361  Index size = Index(1);
362  for (i = 0; i < NumIndices; i++) {
363  internal::check_rows_cols_for_overflow<Dynamic, Dynamic, Dynamic>::run(size, dimensions[i]);
364  size *= dimensions[i];
365  }
366  #ifdef EIGEN_INITIALIZE_COEFFS
367  bool size_changed = size != this->size();
368  m_storage.resize(size, dimensions);
370  #else
371  m_storage.resize(size, dimensions);
372  #endif
373  }
374 
375  // Why this overload, DSizes is derived from array ??? //
378  for (int i = 0; i < NumIndices; ++i) {
379  dims[i] = dimensions[i];
380  }
381  resize(dims);
382  }
383 
385  void resize()
386  {
387  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
388  // Nothing to do: rank 0 tensors have fixed size
389  }
390 
391  template <typename FirstType, typename... OtherTypes>
395  for (int i = 0; i < NumIndices; ++i) {
396  dims[i] = static_cast<Index>(dimensions[i]);
397  }
398  resize(dims);
399  }
400 
402  template<typename CustomDimension,
404  >
405  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
406  {
407  resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
408  }
409 
410 #ifndef EIGEN_EMULATE_CXX11_META_H
411  template <typename std::ptrdiff_t... Indices>
415  for (int i = 0; i < NumIndices; ++i) {
416  dims[i] = static_cast<Index>(dimensions[i]);
417  }
418  resize(dims);
419  }
420 #else
421  template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
425  for (int i = 0; i < NumIndices; ++i) {
426  dims[i] = static_cast<Index>(dimensions[i]);
427  }
428  resize(dims);
429  }
430 #endif
431 
432  #ifdef EIGEN_TENSOR_PLUGIN
433  #include EIGEN_TENSOR_PLUGIN
434  #endif
435 
436  protected:
437 
438  bool checkIndexRange(const array<Index, NumIndices>& indices) const
439  {
442  using internal::greater_equal_zero_op;
443  using internal::logical_and_op;
444  using internal::lesser_op;
445 
446  return
447  // check whether the indices are all >= 0
448  array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
449  // check whether the indices fit in the dimensions
450  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
451  }
452 
453  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
454  {
455  if (Options&RowMajor) {
456  return m_storage.dimensions().IndexOfRowMajor(indices);
457  } else {
458  return m_storage.dimensions().IndexOfColMajor(indices);
459  }
460  }
461 };
462 
463 } // end namespace Eigen
464 
465 #endif // EIGEN_CXX11_TENSOR_TENSOR_H
int n
int i
#define EIGEN_MAX_ALIGN_BYTES
#define eigen_internal_assert(x)
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#define EIGEN_STATIC_ASSERT(X, MSG)
#define EIGEN_SFINAE_ENABLE_IF(__condition__)
Definition: TensorMacros.h:29
The tensor base class.
const FixedDimensions dimensions() const
Definition: TensorStorage.h:61
DenseIndex size() const
Definition: TensorStorage.h:64
The tensor class.
Definition: Tensor.h:67
const Scalar & operator()() const
Definition: Tensor.h:215
Tensor(Index firstDimension, IndexTypes... otherDimensions)
Definition: Tensor.h:283
TensorStorage< Scalar, Dimensions, Options > m_storage
Definition: Tensor.h:90
void resize()
Definition: Tensor.h:385
Index rank() const
Definition: Tensor.h:101
void resize(const DSizes< Index, NumIndices > &dimensions)
Definition: Tensor.h:376
Scalar & operator[](Index index)
Definition: Tensor.h:263
void resize(Index firstDimension, IndexTypes... otherDimensions)
Definition: Tensor.h:350
const Scalar & operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
Definition: Tensor.h:187
DSizes< Index, NumIndices_ > Dimensions
Definition: Tensor.h:87
const Scalar & operator()(Index index) const
Definition: Tensor.h:209
Scalar & coeffRef(CustomIndices &indices)
Definition: Tensor.h:169
Tensor(Self &&other)
Definition: Tensor.h:318
Self & base()
Definition: Tensor.h:111
Scalar & operator()(CustomIndices &indices)
Definition: Tensor.h:246
const Self & base() const
Definition: Tensor.h:112
const Scalar & coeff(const array< Index, NumIndices > &indices) const
Definition: Tensor.h:123
const Scalar & coeff(Index index) const
Definition: Tensor.h:144
Tensor(const TensorBase< OtherDerived, ReadOnlyAccessors > &other)
Definition: Tensor.h:299
Base::CoeffReturnType CoeffReturnType
Definition: Tensor.h:76
bool checkIndexRange(const array< Index, NumIndices > &indices) const
Definition: Tensor.h:438
const Scalar & operator()(CustomIndices &indices) const
Definition: Tensor.h:198
Scalar & coeffRef(const array< Index, NumIndices > &indices)
Definition: Tensor.h:159
Index dimension(std::size_t n) const
Definition: Tensor.h:102
void resize(const Sizes< Indices... > &dimensions)
Definition: Tensor.h:413
Index linearizedIndex(const array< Index, NumIndices > &indices) const
Definition: Tensor.h:453
const Scalar * data() const
Definition: Tensor.h:106
Scalar & operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
Definition: Tensor.h:229
static constexpr int Layout
Definition: Tensor.h:84
Tensor(const TensorBase< OtherDerived, WriteAccessors > &other)
Definition: Tensor.h:309
Tensor & operator=(Self &&other)
Definition: Tensor.h:323
Tensor & operator=(const OtherDerived &other)
Definition: Tensor.h:340
Tensor(const Self &other)
Definition: Tensor.h:277
Tensor(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:291
static constexpr int NumIndices
Definition: Tensor.h:86
void resize(CustomDimension &dimensions)
Definition: Tensor.h:405
Scalar & coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
Definition: Tensor.h:151
Tensor & operator=(const Tensor &other)
Definition: Tensor.h:330
Scalar & operator()(const array< Index, NumIndices > &indices)
Definition: Tensor.h:237
TensorBase< Tensor< Scalar_, NumIndices_, Options_, IndexType_ > > Base
Definition: Tensor.h:70
Index size() const
Definition: Tensor.h:104
Tensor< Scalar_, NumIndices_, Options_, IndexType_ > Self
Definition: Tensor.h:69
void resize(const Eigen::IndexList< FirstType, OtherTypes... > &dimensions)
Definition: Tensor.h:393
Scalar & coeffRef(Index index)
Definition: Tensor.h:180
Scalar & operator()(Index index)
Definition: Tensor.h:251
const Scalar & coeff() const
Definition: Tensor.h:138
const Scalar & coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
Definition: Tensor.h:115
NumTraits< Scalar >::Real RealScalar
Definition: Tensor.h:75
Scalar_ Scalar
Definition: Tensor.h:74
const Scalar & coeff(CustomIndices &indices) const
Definition: Tensor.h:133
const Scalar & operator()(const array< Index, NumIndices > &indices) const
Definition: Tensor.h:204
internal::traits< Self >::Index Index
Definition: Tensor.h:73
const Dimensions & dimensions() const
Definition: Tensor.h:103
Eigen::internal::nested< Self >::type Nested
Definition: Tensor.h:71
static constexpr int Options
Definition: Tensor.h:85
void resize(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:358
Scalar & operator()()
Definition: Tensor.h:257
Scalar * data()
Definition: Tensor.h:105
Scalar & coeffRef()
Definition: Tensor.h:174
const Scalar & operator[](Index index) const
Definition: Tensor.h:221
internal::traits< Self >::StorageKind StorageKind
Definition: Tensor.h:72
@ CoordAccess
Definition: Tensor.h:80
constexpr auto array_apply_and_reduce(array< A, N > a) -> decltype(h_array_apply_and_reduce< Reducer, Op, A, N >(a, typename gen_numeric_list< int, N >::type()))
constexpr auto array_zip_and_reduce(array< A, N > a, array< B, N > b) -> decltype(h_array_zip_and_reduce< Reducer, Op, A, B, N >(a, b, typename gen_numeric_list< int, N >::type()))
std::ptrdiff_t array_prod(const Sizes< Indices... > &)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
std::array< T, N > array
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
DenseIndex IndexOfRowMajor(const array< DenseIndex, NumDims > &indices) const
DenseIndex IndexOfColMajor(const array< DenseIndex, NumDims > &indices) const
A cost model used to limit the number of threads used for evaluating tensor expression.
static const bool is_int
Definition: Tensor.h:95
static const bool is_array
Definition: Tensor.h:94
static const bool value
Definition: Tensor.h:96