TensorContractionMapper.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
17 namespace internal {
18 
19 enum {
20  Rhs = 0,
21  Lhs = 1
22 };
23 
24 /*
25  * Implementation of the Eigen blas_data_mapper class for tensors.
26  */
29 template <typename Tensor, bool HasRawAccess, template <class> class MakePointer_ = MakePointer>
30 struct CoeffLoader;
31 
32 template <typename Scalar, typename Index, int side, typename Tensor,
33  typename nocontract_t, typename contract_t, int packet_size,
34  bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment,
35  template <class> class MakePointer_ = MakePointer>
36 class BaseTensorContractionMapper;
37 
38 template <typename Tensor, bool HasRawAccess, template <class> class MakePointer_>
39 struct CoeffLoader {
40  enum {
41  DirectOffsets = false
42  };
43 
44  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { }
45 
46  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) {
47  eigen_assert(false && "unsupported");
48  }
49 
50  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename MakePointer_<const typename Tensor::Scalar>::Type
51  data() const {
52  eigen_assert(false && "unsupported");
53  return NULL;
54  }
55 
56  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return m_tensor.coeff(index); }
57 
58  template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
59  typename Tensor::PacketReturnType packet(typename Tensor::Index index) const
60  {
61  return m_tensor.template packet<LoadMode>(index);
62  }
63 
64  private:
65  const Tensor m_tensor;
66 };
67 
68 template <typename Tensor, template <class> class MakePointer_>
69 struct CoeffLoader<Tensor, true, MakePointer_> {
70  enum {
71  DirectOffsets = true
72  };
73 
74  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {}
75 
76  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
77  m_data += offset;
78  }
79 
80  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename MakePointer_<const typename Tensor::Scalar>::Type
81  data() const {
82  return m_data;
83  }
84 
85  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return loadConstant(m_data+index); }
86 
87  template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
88  typename Tensor::PacketReturnType packet(typename Tensor::Index index) const
89  {
90  return internal::ploadt_ro<typename Tensor::PacketReturnType, LoadMode>(m_data + index);
91  }
92 
93  private:
94  typedef typename Tensor::Scalar Scalar;
95 
96  typename MakePointer_<const Scalar>::Type m_data;
97 };
98 
99 template<typename Scalar, typename Index, int side,
100  typename Tensor,
101  typename nocontract_t, typename contract_t,
102  int packet_size, bool inner_dim_contiguous, int Alignment, template <class> class MakePointer_ = MakePointer>
103 class SimpleTensorContractionMapper {
104  public:
106  SimpleTensorContractionMapper(const Tensor& tensor,
107  const nocontract_t& nocontract_strides,
108  const nocontract_t& ij_strides,
109  const contract_t& contract_strides,
110  const contract_t& k_strides) :
111  m_tensor(tensor),
112  m_nocontract_strides(nocontract_strides),
113  m_ij_strides(ij_strides),
114  m_contract_strides(contract_strides),
115  m_k_strides(k_strides) { }
116 
117  enum {
118  DirectOffsets = CoeffLoader<Tensor, Tensor::RawAccess, MakePointer_>::DirectOffsets
119  };
120 
121  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
122  m_tensor.offsetBuffer(offset);
123  }
124 
126  EIGEN_STRONG_INLINE void prefetch(Index /*i*/) { }
127 
129  EIGEN_STRONG_INLINE Scalar operator()(Index row) const {
130  // column major assumption
131  return operator()(row, 0);
132  }
133 
135  EIGEN_STRONG_INLINE Scalar operator()(Index row, Index col) const {
136  return m_tensor.coeff(computeIndex(row, col));
137  }
138 
140  EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const {
141  const bool left = (side == Lhs);
142  EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963
143  Index nocontract_val = left ? row : col;
144  Index linidx = 0;
146  for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
147  const Index idx = nocontract_val / m_ij_strides[i];
148  linidx += idx * m_nocontract_strides[i];
149  nocontract_val -= idx * m_ij_strides[i];
150  }
151  if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
152  if (side == Lhs && inner_dim_contiguous) {
153  eigen_assert(m_nocontract_strides[0] == 1);
154  linidx += nocontract_val;
155  } else {
156  linidx += nocontract_val * m_nocontract_strides[0];
157  }
158  }
159 
160  Index contract_val = left ? col : row;
161  if(array_size<contract_t>::value > 0) {
163  for (int i = static_cast<int>(array_size<contract_t>::value) - 1; i > 0; i--) {
164  const Index idx = contract_val / m_k_strides[i];
165  linidx += idx * m_contract_strides[i];
166  contract_val -= idx * m_k_strides[i];
167  }
168 
169  if (side == Rhs && inner_dim_contiguous) {
170  eigen_assert(m_contract_strides[0] == 1);
171  linidx += contract_val;
172  } else {
173  linidx += contract_val * m_contract_strides[0];
174  }
175  }
176 
177  return linidx;
178  }
179 
181  EIGEN_STRONG_INLINE IndexPair<Index> computeIndexPair(Index row, Index col, const Index distance) const {
182  const bool left = (side == Lhs);
183  EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963
184  Index nocontract_val[2] = {left ? row : col, left ? row + distance : col};
185  Index linidx[2] = {0, 0};
186  if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
188  for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
189  const Index idx0 = nocontract_val[0] / m_ij_strides[i];
190  const Index idx1 = nocontract_val[1] / m_ij_strides[i];
191  linidx[0] += idx0 * m_nocontract_strides[i];
192  linidx[1] += idx1 * m_nocontract_strides[i];
193  nocontract_val[0] -= idx0 * m_ij_strides[i];
194  nocontract_val[1] -= idx1 * m_ij_strides[i];
195  }
196  if (side == Lhs && inner_dim_contiguous) {
197  eigen_assert(m_nocontract_strides[0] == 1);
198  linidx[0] += nocontract_val[0];
199  linidx[1] += nocontract_val[1];
200  } else {
201  linidx[0] += nocontract_val[0] * m_nocontract_strides[0];
202  linidx[1] += nocontract_val[1] * m_nocontract_strides[0];
203  }
204  }
205 
206  Index contract_val[2] = {left ? col : row, left ? col : row + distance};
207  if (array_size<contract_t>::value> 0) {
209  for (int i = static_cast<int>(array_size<contract_t>::value) - 1; i > 0; i--) {
210  const Index idx0 = contract_val[0] / m_k_strides[i];
211  const Index idx1 = contract_val[1] / m_k_strides[i];
212  linidx[0] += idx0 * m_contract_strides[i];
213  linidx[1] += idx1 * m_contract_strides[i];
214  contract_val[0] -= idx0 * m_k_strides[i];
215  contract_val[1] -= idx1 * m_k_strides[i];
216  }
217 
218  if (side == Rhs && inner_dim_contiguous) {
219  eigen_assert(m_contract_strides[0] == 1);
220  linidx[0] += contract_val[0];
221  linidx[1] += contract_val[1];
222  } else {
223  linidx[0] += contract_val[0] * m_contract_strides[0];
224  linidx[1] += contract_val[1] * m_contract_strides[0];
225  }
226  }
227  return IndexPair<Index>(linidx[0], linidx[1]);
228  }
229 
230  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const {
231  // Only claim alignment when we can compute the actual stride (ie when we're
232  // dealing with the lhs with inner_dim_contiguous. This is because the
233  // matrix-vector product relies on the stride when dealing with aligned inputs.
234  return (Alignment == Aligned) && (side == Lhs) && inner_dim_contiguous ? 0 : size;
235  }
237  return ((side == Lhs) && inner_dim_contiguous && array_size<contract_t>::value > 0) ? m_contract_strides[0] : 1;
238  }
239 
240  const CoeffLoader<Tensor, Tensor::RawAccess, MakePointer_>& tensor() const {
241  return m_tensor;
242  }
243 
244  const nocontract_t& nocontract_strides() const {
245  return m_nocontract_strides;
246  }
247  const nocontract_t& ij_strides() const { return m_ij_strides; }
248  const contract_t& contract_strides() const { return m_contract_strides; }
249  const contract_t& k_strides() const { return m_k_strides; }
250 
251  protected:
252  CoeffLoader<Tensor, Tensor::RawAccess, MakePointer_> m_tensor;
253  const nocontract_t m_nocontract_strides;
254  const nocontract_t m_ij_strides;
255  const contract_t m_contract_strides;
256  const contract_t m_k_strides;
257 };
258 
259 template<typename Scalar, typename Index, int side,
260  typename Tensor,
261  typename nocontract_t, typename contract_t,
262  int packet_size, bool inner_dim_contiguous,
263  bool inner_dim_reordered, int Alignment, template <class> class MakePointer_>
264 class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, Alignment, MakePointer_>
265 {
266  public:
267  typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, Alignment, MakePointer_> ParentMapper;
268 
270  BaseTensorContractionMapper(const Tensor& tensor,
271  const nocontract_t& nocontract_strides,
272  const nocontract_t& ij_strides,
273  const contract_t& contract_strides,
274  const contract_t& k_strides) :
275  ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
276 
277  template <typename PacketT,int AlignmentType>
278  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
279  std::enable_if_t<internal::unpacket_traits<PacketT>::size==packet_size,PacketT>
280  load(Index i, Index j) const
281  {
282  // whole method makes column major assumption
283 
284  // don't need to add offsets for now (because operator handles that)
285  // current code assumes packet size must be a multiple of 2
286  EIGEN_STATIC_ASSERT(packet_size % 2 == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
287 
288  if (Tensor::PacketAccess && inner_dim_contiguous && !inner_dim_reordered) {
289  const Index index = this->computeIndex(i, j);
290  eigen_assert(this->computeIndex(i+packet_size-1, j) == index + packet_size-1);
291  return this->m_tensor.template packet<AlignmentType>(index);
292  }
293 
294  const IndexPair<Index> indexPair = this->computeIndexPair(i, j, packet_size - 1);
295  const Index first = indexPair.first;
296  const Index lastIdx = indexPair.second;
297 
298  // We can always do optimized packet reads from left hand side right now, because
299  // the vertical matrix dimension on the left hand side is never contracting.
300  // On the right hand side we need to check if the contracting dimensions may have
301  // been shuffled first.
302  if (Tensor::PacketAccess &&
303  (side == Lhs || internal::array_size<contract_t>::value <= 1 || !inner_dim_reordered) &&
304  (lastIdx - first) == (packet_size - 1)) {
305 
306  return this->m_tensor.template packet<AlignmentType>(first);
307  }
308 
309  EIGEN_ALIGN_MAX Scalar data[packet_size];
310 
311  data[0] = this->m_tensor.coeff(first);
313  for (Index k = 1; k < packet_size - 1; k += 2) {
314  const IndexPair<Index> internal_pair = this->computeIndexPair(i + k, j, 1);
315  data[k] = this->m_tensor.coeff(internal_pair.first);
316  data[k + 1] = this->m_tensor.coeff(internal_pair.second);
317  }
318  data[packet_size - 1] = this->m_tensor.coeff(lastIdx);
319 
320  return pload<PacketT>(data);
321  }
322 
323  template <typename PacketT,int AlignmentType>
324  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
325  std::enable_if_t<internal::unpacket_traits<PacketT>::size!=packet_size,PacketT>
326  load(Index i, Index j) const
327  {
328  const Index requested_packet_size = internal::unpacket_traits<PacketT>::size;
329  EIGEN_ALIGN_MAX Scalar data[requested_packet_size];
330 
331  const IndexPair<Index> indexPair = this->computeIndexPair(i, j, requested_packet_size - 1);
332  const Index first = indexPair.first;
333  const Index lastIdx = indexPair.second;
334 
335  data[0] = this->m_tensor.coeff(first);
336  for (Index k = 1; k < requested_packet_size - 1; k += 2) {
337  const IndexPair<Index> internal_pair = this->computeIndexPair(i + k, j, 1);
338  data[k] = this->m_tensor.coeff(internal_pair.first);
339  data[k + 1] = this->m_tensor.coeff(internal_pair.second);
340  }
341  data[requested_packet_size - 1] = this->m_tensor.coeff(lastIdx);
342 
343  return pload<PacketT>(data);
344  }
345 
346  template <typename PacketT,int AlignmentType>
348  EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const {
349  return this->load<PacketT,AlignmentType>(i,j);
350  }
351 };
352 
353 
354 template<typename Scalar, typename Index, int side,
355  typename Tensor,
356  typename nocontract_t, typename contract_t,
357  bool inner_dim_contiguous,
358  bool inner_dim_reordered, int Alignment, template <class> class MakePointer_>
359 class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_>
360  : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment, MakePointer_>
361 {
362  public:
363  typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment, MakePointer_> ParentMapper;
364 
366  BaseTensorContractionMapper(const Tensor& tensor,
367  const nocontract_t& nocontract_strides,
368  const nocontract_t& ij_strides,
369  const contract_t& contract_strides,
370  const contract_t& k_strides) :
371  ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
372 
373  template <typename PacketT,int> EIGEN_DEVICE_FUNC
374  EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const {
375  EIGEN_ALIGN_MAX Scalar data[1];
376  data[0] = this->m_tensor.coeff(this->computeIndex(i, j));
377  return pload<PacketT>(data);
378  }
379  template <typename PacketT,int> EIGEN_DEVICE_FUNC
380  EIGEN_STRONG_INLINE PacketT load(Index i, Index j) const {
381  EIGEN_ALIGN_MAX Scalar data[1];
382  data[0] = this->m_tensor.coeff(this->computeIndex(i, j));
383  return pload<PacketT>(data);
384  }
385 };
386 
387 
388 template<typename Scalar, typename Index, int side,
389  typename Tensor,
390  typename nocontract_t, typename contract_t,
391  int packet_size,
392  bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment, template <class> class MakePointer_=MakePointer>
393 class TensorContractionSubMapper {
394  public:
395 
396  typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> ParentMapper;
397  typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> Self;
398  typedef Self LinearMapper;
399  typedef Self SubMapper;
400 
401  enum {
402  // We can use direct offsets iff the parent mapper supports then and we can compute the strides.
403  // TODO: we should also enable direct offsets for the Rhs case.
404  UseDirectOffsets = ParentMapper::DirectOffsets && (side == Lhs) && inner_dim_contiguous && (array_size<contract_t>::value > 0)
405  };
406 
407  EIGEN_DEVICE_FUNC TensorContractionSubMapper(const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset)
408  : m_base_mapper(base_mapper), m_vert_offset(vert_offset), m_horiz_offset(horiz_offset) {
409  // Bake the offsets into the buffer used by the base mapper whenever possible. This avoids the need to recompute
410  // this offset every time we attempt to access a coefficient.
411  if (UseDirectOffsets) {
412  Index stride = m_base_mapper.stride();
413  m_base_mapper.offsetBuffer(vert_offset + horiz_offset * stride);
414  }
415  }
416 
418  if (UseDirectOffsets) {
419  return m_base_mapper(i, 0);
420  }
421  return m_base_mapper(i + m_vert_offset, m_horiz_offset);
422  }
424  if (UseDirectOffsets) {
425  return m_base_mapper(i, j);
426  }
427  return m_base_mapper(i + m_vert_offset, j + m_horiz_offset);
428  }
429 
430  template <typename PacketT>
431  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i) const {
432  if (UseDirectOffsets) {
433  return m_base_mapper.template loadPacket<PacketT,Alignment>(i, 0);
434  }
435  return m_base_mapper.template loadPacket<PacketT,Alignment>(i + m_vert_offset, m_horiz_offset);
436  }
437 
438  template <typename PacketT>
439  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const {
440  if (UseDirectOffsets) {
441  return m_base_mapper.template loadPacket<PacketT,Alignment>(i, j);
442  }
443  return m_base_mapper.template loadPacket<PacketT,Alignment>(i + m_vert_offset, j + m_horiz_offset);
444  }
445 
446  template <typename PacketT, int AlignmentType>
447  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const {
448  if (UseDirectOffsets) {
449  return m_base_mapper.template load<PacketT,AlignmentType>(i, j);
450  }
451  return m_base_mapper.template loadPacket<PacketT,AlignmentType>(i + m_vert_offset, j + m_horiz_offset);
452  }
453 
454  template <typename PacketT>
455  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketT& p) const {
456  if (UseDirectOffsets) {
457  m_base_mapper.storePacket(i, 0, p);
458  }
459  m_base_mapper.storePacket(i + m_vert_offset, m_horiz_offset, p);
460  }
461 
462  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
463  if (UseDirectOffsets) {
464  return LinearMapper(m_base_mapper, i, j);
465  }
466  return LinearMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset);
467  }
468 
469  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubMapper getSubMapper(Index i, Index j) const {
470  if (UseDirectOffsets) {
471  return SubMapper(m_base_mapper, i, j);
472  }
473  return SubMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset);
474  }
475 
476  template <typename PacketT, int AlignmentType>
477  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const {
478  EIGEN_STATIC_ASSERT((internal::is_same<PacketT, PacketT>::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
479  const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned;
480  if (UseDirectOffsets) {
481  return m_base_mapper.template loadPacket<PacketT,ActualAlignment>(i, 0);
482  }
483  return m_base_mapper.template loadPacket<PacketT,ActualAlignment>(i + m_vert_offset, m_horiz_offset);
484  }
485 
486  template <typename PacketT>
487  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const {
488  return false;
489  }
490 
491  const ParentMapper& base_mapper() const { return m_base_mapper; }
492  Index vert_offset() const { return m_vert_offset; }
493  Index horiz_offset() const { return m_horiz_offset; }
494 
495  private:
496  ParentMapper m_base_mapper;
497  const Index m_vert_offset;
498  const Index m_horiz_offset;
499 };
500 
501 
502 template<typename Scalar_, typename Index, int side,
503  typename Tensor,
504  typename nocontract_t, typename contract_t,
505  int packet_size,
506  bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment, template <class> class MakePointer_=MakePointer>
507 class TensorContractionInputMapper
508  : public BaseTensorContractionMapper<Scalar_, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> {
509 
510  public:
511  typedef Scalar_ Scalar;
512  typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> Base;
513  typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> SubMapper;
514  typedef SubMapper VectorMapper;
515  typedef SubMapper LinearMapper;
516 
517  EIGEN_DEVICE_FUNC TensorContractionInputMapper(const Tensor& tensor,
518  const nocontract_t& nocontract_strides,
519  const nocontract_t& ij_strides,
520  const contract_t& contract_strides,
521  const contract_t& k_strides)
522  : Base(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
523 
525  EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const {
526  return SubMapper(*this, i, j);
527  }
528 
529  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
530  return LinearMapper(*this, i, j);
531  }
532 
533  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {
534  return VectorMapper(*this, i, j);
535  }
536 
537  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const CoeffLoader<Tensor, Tensor::RawAccess, MakePointer_>& get_tensor() const {
538  return Base::m_tensor;
539  }
540 };
541 
542 
543 template <typename T> struct TensorContractionInputMapperTrait;
544 
545 template<typename Scalar_, typename Index_, int side_,
546  typename Tensor_,
547  typename nocontract_t_, typename contract_t_,
548  int packet_size_,
549  bool inner_dim_contiguous_, bool inner_dim_reordered_, int Alignment_, template <class> class MakePointer_>
550 struct TensorContractionInputMapperTrait<TensorContractionInputMapper<Scalar_, Index_, side_, Tensor_,
551  nocontract_t_, contract_t_, packet_size_, inner_dim_contiguous_,
552  inner_dim_reordered_, Alignment_, MakePointer_> > {
553 
554  typedef Tensor_ XprType;
555  static const bool inner_dim_contiguous = inner_dim_contiguous_;
556  static const bool inner_dim_reordered = inner_dim_reordered_;
557  };
558 
559 
560 } // end namespace internal
561 } // end namespace Eigen
562 
563 #endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
int i
RowXpr row(Index i) const
ColXpr col(Index i) const
#define EIGEN_ALIGN_MAX
IndexedView_or_VectorBlock operator()(const Indices &indices)
#define EIGEN_ALWAYS_INLINE
#define EIGEN_UNROLL_LOOP
#define EIGEN_UNUSED_VARIABLE(var)
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
int data[]
#define EIGEN_STATIC_ASSERT(X, MSG)
The tensor class.
Definition: Tensor.h:67
Scalar_ Scalar
Definition: Tensor.h:74
internal::traits< Self >::Index Index
Definition: Tensor.h:73
AlignmentType
EIGEN_ALWAYS_INLINE T loadConstant(const T *address)
void prefetch(const Scalar *addr)
EIGEN_CONSTEXPR Index first(const T &x) EIGEN_NOEXCEPT
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index