TensorContraction.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
24 namespace internal {
25 
26 template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
27 struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >
28 {
29  // Type promotion to handle the case where the types of the lhs and the rhs are different.
30  typedef typename gebp_traits<std::remove_const_t<typename LhsXprType::Scalar>,
31  std::remove_const_t<typename RhsXprType::Scalar>>::ResScalar Scalar;
32 
33  typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
34  typename traits<RhsXprType>::StorageKind>::ret StorageKind;
35  typedef typename promote_index_type<typename traits<LhsXprType>::Index,
36  typename traits<RhsXprType>::Index>::type Index;
37  typedef typename LhsXprType::Nested LhsNested;
38  typedef typename RhsXprType::Nested RhsNested;
39  typedef std::remove_reference_t<LhsNested> LhsNested_;
40  typedef std::remove_reference_t<RhsNested> RhsNested_;
41 
42  // From NumDims below.
43  static constexpr int NumDimensions = traits<LhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
44  static constexpr int Layout = traits<LhsXprType>::Layout;
45  typedef std::conditional_t<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
48  PointerType;
49 
50  enum {
51  Flags = 0
52  };
53 };
54 
55 template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
56 struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, Eigen::Dense>
57 {
58  typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>& type;
59 };
60 
61 template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
62 struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >::type>
63 {
64  typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> type;
65 };
66 
67 template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename OutputKernelType_, typename Device_>
68 struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_> > {
69  typedef Indices_ Indices;
70  typedef LeftArgType_ LeftArgType;
71  typedef RightArgType_ RightArgType;
72  typedef OutputKernelType_ OutputKernelType;
73  typedef Device_ Device;
74 
75  // From NumDims below.
76  static constexpr int NumDimensions = traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
77 };
78 
79 // Helper class to allocate and deallocate temporary memory for packed buffers.
80 template <typename LhsScalar, typename RhsScalar>
81 struct TensorContractionBlockMemAllocator {
82  typedef void* BlockMemHandle;
83 
84  template <typename Device>
85  EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm,
86  const Index bk,
87  const Index bn,
88  LhsScalar** lhs_block,
89  RhsScalar** rhs_block) {
90  eigen_assert(lhs_block);
91  eigen_assert(rhs_block);
92  BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
93  char* block_mem = static_cast<char*>(d.allocate(sz.lhs_size + sz.rhs_size));
94  *lhs_block = static_cast<LhsScalar*>(static_cast<void*>(block_mem));
95  *rhs_block = static_cast<RhsScalar*>(static_cast<void*>(block_mem + sz.lhs_size));
96  return block_mem;
97  }
98 
99  template <typename Device>
100  EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices(
101  Device& d, const Index bm, const Index bk, const Index bn,
102  const Index num_lhs, const Index num_rhs, const Index num_slices,
103  std::vector<LhsScalar*>* lhs_blocks,
104  std::vector<RhsScalar*>* rhs_blocks) {
105  eigen_assert(num_slices > 0);
106  eigen_assert(num_lhs >= 0 && num_rhs >= 0);
107  eigen_assert(num_lhs == 0 || lhs_blocks);
108  eigen_assert(num_rhs == 0 || rhs_blocks);
109  BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
110  void* block_mem = d.allocate(
111  (num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
112  eigen_assert(block_mem);
113  char* mem = static_cast<char*>(block_mem);
114 
115  for (Index x = 0; x < num_slices; x++) {
116  if (num_lhs > 0) lhs_blocks[x].resize(num_lhs);
117  for (Index m = 0; m < num_lhs; m++) {
118  lhs_blocks[x][m] = static_cast<LhsScalar*>(static_cast<void*>(mem));
119  mem += sz.lhs_size;
120  }
121  if (num_rhs > 0) rhs_blocks[x].resize(num_rhs);
122  for (Index n = 0; n < num_rhs; n++) {
123  rhs_blocks[x][n] = static_cast<RhsScalar*>(static_cast<void*>(mem));
124  mem += sz.rhs_size;
125  }
126  }
127 
128  return block_mem;
129  }
130 
131  template <typename Device>
132  EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
133  d.deallocate(handle);
134  }
135 
136  private:
137  struct BlockSizes {
138  Index lhs_size;
139  Index rhs_size;
140  };
141  EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm,
142  const Index bk,
143  const Index bn) {
144  Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
145  BlockSizes sz;
146  sz.lhs_size = divup<Index>(bm * bk * sizeof(LhsScalar), align) * align;
147  sz.rhs_size = divup<Index>(bn * bk * sizeof(RhsScalar), align) * align;
148  return sz;
149  }
150 };
151 
152 // WARNING: In this code we assume that Lhs and Rhs tensor expressions are in
153 // ColMajor storage order. This property is guaranteed by the
154 // TensorContractionOp evaluator. TensorContractionKernel specifies how we pack
155 // blocks of Lhs and Rhs tensor expressions, and how we invoke matrix
156 // multiplication for these blocks. Default tensor contraction uses
157 // gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see
158 // GeneralBlocPanelKernel.h for details).
159 //
160 // By specializing contraction kernels we can use other low level libraries to
161 // perform matrix multiplication, and still rely on Eigen contraction evaluator.
162 // This also includes full support in TensorContractionThreadPool, assuming that
163 // underlying gemm do not use it's own threading.
164 //
165 // - ResScalar/LhsScalar/RhsScalar - scalar type for the result of
166 // multiplication, lhs tensor and rhs tensor respectively.
167 //
168 // - StorageIndex - index type for the tensor expressions. In practice almost
169 // always is Eigen::Index.
170 //
171 // - OutputMapper provides access to the memory of the output matrix. In
172 // practice it's always column major blas_data_mapper (it must be of ResScalar
173 // type).
174 //
175 // - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional
176 // view into the Lhs/Rhs tensor expressions. In practice it's
177 // TensorContractionInputMapper, or some specialization of it based on the
178 // type of tensor expression (e.g. TensorImagePatchOp has optimized input
179 // mapper).
180 template <typename ResScalar, typename LhsScalar, typename RhsScalar,
181  typename StorageIndex, typename OutputMapper, typename LhsMapper,
182  typename RhsMapper>
183 struct TensorContractionKernel {
184  // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C`
185  // (otherwise beta should be always equal to 1).
186  enum { HasBeta = false };
187 
189  TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_,
190  StorageIndex bm_, StorageIndex bk_, StorageIndex bn_)
191  : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {}
192 
193  // Pack blocks of Lhs and Rhs into contiguous blocks in memory.
194  typedef LhsScalar* LhsBlock;
195  typedef RhsScalar* RhsBlock;
196 
197  // Packed Lhs/Rhs block memory allocator.
198  typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar>
199  BlockMemAllocator;
200  typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle;
201 
202  typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
203 
204  typedef internal::gemm_pack_lhs<
205  LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr,
206  Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor>
207  LhsPacker;
208 
209  typedef internal::gemm_pack_rhs<RhsScalar, StorageIndex,
210  typename RhsMapper::SubMapper, Traits::nr,
211  ColMajor>
212  RhsPacker;
213 
214  typedef internal::gebp_kernel<LhsScalar, RhsScalar, StorageIndex,
215  OutputMapper, Traits::mr, Traits::nr,
216  /*ConjugateLhs*/ false, /*ConjugateRhs*/ false>
217  GebpKernel;
218 
219  template <typename Device>
220  EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block,
221  RhsBlock* rhs_block) {
222  return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block);
223  }
224 
225  template <typename Device>
226  EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(
227  Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs,
228  const StorageIndex num_slices, std::vector<LhsBlock>* lhs_blocks,
229  std::vector<RhsBlock>* rhs_blocks) {
230  return BlockMemAllocator::allocateSlices(
231  d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks);
232  }
233 
234  template <typename Device>
235  EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
236  BlockMemAllocator::deallocate(d, handle);
237  }
238 
240  LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper,
241  const StorageIndex depth, const StorageIndex rows) {
242  LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0,
243  /*offset*/ 0);
244  }
245 
247  RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper,
248  const StorageIndex depth, const StorageIndex cols) {
249  RhsPacker()(*rhsBlock, data_mapper, depth, cols);
250  }
251 
253  const OutputMapper& output_mapper, const LhsBlock& lhsBlock,
254  const RhsBlock& rhsBlock, const StorageIndex rows,
255  const StorageIndex depth, const StorageIndex cols,
256  const ResScalar alpha, const ResScalar beta) {
257  // Default GEBP kernel does not support beta.
258  eigen_assert(beta == ResScalar(1));
259  static const int kComputeStrideFromBlockDimensions = -1;
260  GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha,
261  /*strideA*/ kComputeStrideFromBlockDimensions,
262  /*strideB*/ kComputeStrideFromBlockDimensions,
263  /*offsetA*/ 0, /*offsetB*/ 0);
264  }
265 
266  private:
267  // These are dimensions of the original Tensors, and selected block sizes. The
268  // actual block sizes passed to all function above might be smaller because of
269  // the partial blocks at the end.
270  const StorageIndex m;
271  const StorageIndex k;
272  const StorageIndex n;
273  const StorageIndex bm;
274  const StorageIndex bk;
275  const StorageIndex bn;
276 };
277 
278 } // end namespace internal
279 
280 // Tensor contraction params that should enable to get from output matrix
281 // 2-dimensional coordinates to the output tensor dimensions.
283  // TensorContraction evaluator assumes that both tensors are in ColMajor
284  // layout, if tensors are in RowMajor evaluator swap lhs with rhs.
286 };
287 
288 // Output kernel allows to fuse operations into the tensor contraction.
289 //
290 // Examples:
291 // 1. Elementwise Relu transformation following Conv2D.
292 // 2. AddBias to the Conv2D output channels dimension.
293 //
294 // The NoOpOutputKernel implements an output kernel that does absolutely nothing.
311  template <typename Index, typename Scalar>
313  const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
314  const TensorContractionParams& params, Index i,
315  Index j, Index num_rows, Index num_cols) const {
316  EIGEN_UNUSED_VARIABLE(output_mapper);
317  EIGEN_UNUSED_VARIABLE(params);
320  EIGEN_UNUSED_VARIABLE(num_rows);
321  EIGEN_UNUSED_VARIABLE(num_cols);
322  }
323 };
324 
325 template<typename Indices, typename LhsXprType, typename RhsXprType, typename OutputKernelType = const NoOpOutputKernel>
326 class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors>
327 {
328  public:
329  typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
330  typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
331  typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
332  typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
333  typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
334  typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
335 
337  const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims,
338  const OutputKernelType& output_kernel = OutputKernelType())
339  : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims),
340  m_output_kernel(output_kernel) {}
341 
343  const Indices& indices() const { return m_indices; }
344 
348  lhsExpression() const { return m_lhs_xpr; }
349 
352  rhsExpression() const { return m_rhs_xpr; }
353 
355  const OutputKernelType& outputKernel() const { return m_output_kernel; }
356 
357  protected:
358  typename LhsXprType::Nested m_lhs_xpr;
359  typename RhsXprType::Nested m_rhs_xpr;
360  const Indices m_indices;
361  const OutputKernelType m_output_kernel;
362 };
363 
364 template<typename Derived>
366  typedef typename internal::traits<Derived>::Indices Indices;
367  typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
368  typedef typename internal::traits<Derived>::RightArgType RightArgType;
369  typedef typename internal::traits<Derived>::OutputKernelType OutputKernelType;
370  typedef typename internal::traits<Derived>::Device Device;
371 
373  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
374  typedef typename XprType::Index Index;
379 
381  enum {
382  IsAligned = true,
384  BlockAccess = false,
386  CoordAccess = false, // to be implemented
387  RawAccess = true
388  };
389 
390  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
391  typedef internal::TensorBlockNotImplemented TensorBlock;
392  //===--------------------------------------------------------------------===//
393 
394  // Most of the code is assuming that both input tensors are ColMajor. If the
395  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
396  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
397  // will pretend B is LHS and A is RHS.
398  typedef std::conditional_t<
399  static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
400  typedef std::conditional_t<
401  static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType> EvalRightArgType;
402 
405 
406  static constexpr int LDims =
407  internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
408  static constexpr int RDims =
409  internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
410  static constexpr int ContractDims = internal::array_size<Indices>::value;
411  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
412 
416 
418 
419  EIGEN_STRONG_INLINE
421  : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
422  op.lhsExpression(), op.rhsExpression()), device),
423  m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
424  op.rhsExpression(), op.lhsExpression()), device),
425  m_device(device),
426  m_output_kernel(op.outputKernel()),
427  m_result(NULL) {
430  YOU_MADE_A_PROGRAMMING_MISTAKE);
431 
432 
433  DSizes<Index, LDims> eval_left_dims;
434  DSizes<Index, RDims> eval_right_dims;
435  array<IndexPair<Index>, ContractDims> eval_op_indices;
436  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
437  // For ColMajor, we keep using the existing dimensions
438  for (int i = 0; i < LDims; i++) {
439  eval_left_dims[i] = m_leftImpl.dimensions()[i];
440  }
441  for (int i = 0; i < RDims; i++) {
442  eval_right_dims[i] = m_rightImpl.dimensions()[i];
443  }
444  // We keep the pairs of contracting indices.
445  for (int i = 0; i < ContractDims; i++) {
446  eval_op_indices[i].first = op.indices()[i].first;
447  eval_op_indices[i].second = op.indices()[i].second;
448  }
449  } else {
450  // For RowMajor, we need to reverse the existing dimensions
451  for (int i = 0; i < LDims; i++) {
452  eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
453  }
454  for (int i = 0; i < RDims; i++) {
455  eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
456  }
457  // We need to flip all the pairs of contracting indices as well as
458  // reversing the dimensions.
459  for (int i = 0; i < ContractDims; i++) {
460  eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
461  eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
462  }
463  }
464 
465  // Check for duplicate axes and make sure the first index in eval_op_indices
466  // is increasing. Using O(n^2) sorting is OK since ContractDims is small
467  for (int i = 0; i < ContractDims; i++) {
468  for (int j = i + 1; j < ContractDims; j++) {
469  eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
470  eval_op_indices[j].second != eval_op_indices[i].second &&
471  "contraction axes should be unique");
472  if (eval_op_indices[j].first < eval_op_indices[i].first) {
473  numext::swap(eval_op_indices[j], eval_op_indices[i]);
474  }
475  }
476  }
477 
478  array<Index, LDims> lhs_strides;
479  lhs_strides[0] = 1;
480  for (int i = 0; i < LDims-1; ++i) {
481  lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i];
482  }
483 
484  array<Index, RDims> rhs_strides;
485  rhs_strides[0] = 1;
486  for (int i = 0; i < RDims-1; ++i) {
487  rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i];
488  }
489 
490  if (m_i_strides.size() > 0) m_i_strides[0] = 1;
491  if (m_j_strides.size() > 0) m_j_strides[0] = 1;
492  if (m_k_strides.size() > 0) m_k_strides[0] = 1;
493 
494  m_i_size = 1;
495  m_j_size = 1;
496  m_k_size = 1;
497 
498  // To compute the dimension, we simply concatenate the non-contracting
499  // dimensions of the left and then the right tensor. Additionally, we also
500  // compute the strides corresponding to the left non-contracting
501  // dimensions and right non-contracting dimensions.
503  int dim_idx = 0;
504  Index nocontract_idx = 0;
505 
506  for (int i = 0; i < LDims; i++) {
507  // find if we are contracting on index i of left tensor
508  bool contracting = false;
509  for (int j = 0; j < ContractDims; j++) {
510  if (eval_op_indices[j].first == i) {
511  contracting = true;
512  break;
513  }
514  }
515  if (!contracting) {
516  // add dimension size to output dimensions
517  m_dimensions[dim_idx] = eval_left_dims[i];
518  m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
519  if (dim_idx != i) {
521  }
522  if (nocontract_idx+1 < internal::array_size<left_nocontract_t>::value) {
523  m_i_strides[nocontract_idx+1] =
524  m_i_strides[nocontract_idx] * eval_left_dims[i];
525  } else {
526  m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
527  }
528  dim_idx++;
529  nocontract_idx++;
530  }
531  }
532 
533  nocontract_idx = 0;
534  for (int i = 0; i < RDims; i++) {
535  bool contracting = false;
536  // find if we are contracting on index i of right tensor
537  for (int j = 0; j < ContractDims; j++) {
538  if (eval_op_indices[j].second == i) {
539  contracting = true;
540  break;
541  }
542  }
543  if (!contracting) {
544  m_dimensions[dim_idx] = eval_right_dims[i];
545  if (nocontract_idx+1 < internal::array_size<right_nocontract_t>::value) {
546  m_j_strides[nocontract_idx+1] =
547  m_j_strides[nocontract_idx] * eval_right_dims[i];
548  } else {
549  m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
550  }
551  m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
552  dim_idx++;
553  nocontract_idx++;
554  }
555  }
556 
557  // Now compute the strides corresponding to the contracting dimensions. We
558  // assumed above that non-contracting axes are represented in the same order
559  // in the matrix as they are in the tensor. This is not the case for
560  // contracting axes. As the contracting axes must be of the same size in
561  // each tensor, we'll only look at the first tensor here.
564  for (int i = 0; i < ContractDims; i++) {
565  Index left = eval_op_indices[i].first;
566  Index right = eval_op_indices[i].second;
567 
568  Index size = eval_left_dims[left];
569  eigen_assert(size == eval_right_dims[right] &&
570  "Contraction axes must be same size");
571 
572  if (i+1 < static_cast<int>(internal::array_size<contract_t>::value)) {
573  m_k_strides[i+1] = m_k_strides[i] * size;
574  } else {
575  m_k_size = m_k_strides[i] * size;
576  }
577  m_left_contracting_strides[i] = lhs_strides[left];
578  m_right_contracting_strides[i] = rhs_strides[right];
579 
580  if (i > 0 && right < eval_op_indices[i-1].second) {
582  }
583  if (right != i) {
585  }
586  }
587 
588  // If the layout is RowMajor, we need to reverse the m_dimensions
589  if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
590  for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
592  }
593  }
594 
595  // A set of parameters that will allow output kernel to get from output
596  // tensor dimensions (i, j) into the original tensor dimensions.
597  // TODO(ezhulenev): Add parameters required to infer output tensor index for
598  // more complex contractions than 2x2 on internal dimension.
600  }
601 
602  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
603 
604  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
607  if (data) {
608  evalTo(data);
609  return false;
610  } else {
611  m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
612  evalTo(m_result);
613  return true;
614  }
615  }
616 
617 #ifdef EIGEN_USE_THREADS
618  template <typename EvalSubExprsCallback>
619  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
620  EvaluatorPointerType dest, EvalSubExprsCallback done) {
621  m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
622  m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
623  if (dest) {
624  evalToAsync(dest, [done]() { done(false); });
625  } else {
626  m_result = static_cast<EvaluatorPointerType>(
627  m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
628  evalToAsync(m_result, [done]() { done(true); });
629  }
630  });
631  });
632  }
633 #endif // EIGEN_USE_THREADS
634 
635 #ifndef TENSOR_CONTRACTION_DISPATCH
636 #define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
637  if (this->m_lhs_inner_dim_contiguous) { \
638  if (this->m_rhs_inner_dim_contiguous) { \
639  if (this->m_rhs_inner_dim_reordered) { \
640  METHOD<true, true, true, ALIGNMENT> ARGS; \
641  } else { \
642  METHOD<true, true, false, ALIGNMENT> ARGS; \
643  } \
644  } else { \
645  if (this->m_rhs_inner_dim_reordered) { \
646  METHOD<true, false, true, ALIGNMENT> ARGS; \
647  } else { \
648  METHOD<true, false, false, ALIGNMENT> ARGS; \
649  } \
650  } \
651  } else { \
652  if (this->m_rhs_inner_dim_contiguous) { \
653  if (this->m_rhs_inner_dim_reordered) { \
654  METHOD<false, true, true, ALIGNMENT> ARGS; \
655  } else { \
656  METHOD<false, true, false, ALIGNMENT> ARGS; \
657  } \
658  } else { \
659  if (this->m_rhs_inner_dim_reordered) { \
660  METHOD<false, false, true, ALIGNMENT> ARGS; \
661  } else { \
662  METHOD<false, false, false, ALIGNMENT> ARGS; \
663  } \
664  } \
665  }
666 #endif
667 
668 #ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH
669 #define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
670  if (this->m_lhs_inner_dim_contiguous) { \
671  if (this->m_rhs_inner_dim_contiguous) { \
672  if (this->m_rhs_inner_dim_reordered) { \
673  (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
674  } else { \
675  (new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
676  } \
677  } else { \
678  if (this->m_rhs_inner_dim_reordered) { \
679  (new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
680  } else { \
681  (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
682  } \
683  } \
684  } else { \
685  if (this->m_rhs_inner_dim_contiguous) { \
686  if (this->m_rhs_inner_dim_reordered) { \
687  (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
688  } else { \
689  (new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
690  } \
691  } else { \
692  if (this->m_rhs_inner_dim_reordered) { \
693  (new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
694  } else { \
695  (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
696  } \
697  } \
698  }
699 #endif
700 
701  EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
702  static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
703  }
704 
705 #ifdef EIGEN_USE_THREADS
706  template <typename EvalToCallback>
707  void evalToAsync(Scalar* buffer, EvalToCallback done) const {
708  static_cast<const Derived*>(this)
709  ->template evalProductAsync<EvalToCallback, Unaligned>(buffer,
710  std::move(done));
711  }
712 #endif // EIGEN_USE_THREADS
713 
714  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
715  bool rhs_inner_dim_reordered, int Alignment>
716  void evalProductSequential(Scalar* buffer) const {
717  if (this->m_j_size == 1) {
718  this->template evalGemv<lhs_inner_dim_contiguous,
719  rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
720  Alignment>(buffer);
721  } else {
722  this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
723  rhs_inner_dim_reordered, Alignment>(buffer);
724  }
725  }
726 
727  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
728  #if !defined(EIGEN_HIPCC)
730  #endif
731  void evalGemv(Scalar* buffer) const {
732  const Index rows = m_i_size;
733  const Index cols = m_k_size;
734 
735  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
736  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
737  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
738  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
739  const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
740  const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
741  const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
742  const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
743  typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
744  LeftEvaluator, left_nocontract_t,
745  contract_t, lhs_packet_size,
746  lhs_inner_dim_contiguous,
747  false, lhs_alignment> LhsMapper;
748 
749  typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
750  RightEvaluator, right_nocontract_t,
751  contract_t, rhs_packet_size,
752  rhs_inner_dim_contiguous,
753  rhs_inner_dim_reordered, rhs_alignment> RhsMapper;
754 
759 
760  const Scalar alpha(1);
761  const Index resIncr(1);
762 
763  // zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
764  m_device.fill(buffer, buffer + rows, Scalar(0));
765 
766  internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
767  rows, cols, lhs, rhs,
768  buffer, resIncr, alpha);
769 
770  typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
771  m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params,
772  static_cast<Index>(0), static_cast<Index>(0), rows,
773  static_cast<Index>(1));
774  }
775 
776  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
777  #if !defined(EIGEN_HIPCC)
779  #endif
780  void evalGemm(Scalar* buffer) const {
781  // columns in left side, rows in right side
782  const Index k = this->m_k_size;
783  this->template evalGemmPartial<lhs_inner_dim_contiguous,
784  rhs_inner_dim_contiguous,
785  rhs_inner_dim_reordered,
786  Alignment, true>(buffer, 0, k, 1);
787  }
788 
789  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
790  bool rhs_inner_dim_reordered, int Alignment>
792  Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
793  evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
794  rhs_inner_dim_reordered, Alignment,
795  /*use_output_kernel*/ false>(buffer, k_start, k_end,
796  num_threads);
797  }
798 
799  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel>
800  EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
801  eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size);
802  // columns in slice on left side, rows on right side
803  const Index k_slice = k_end - k_start;
804 
805  // rows in left side
806  const Index m = this->m_i_size;
807 
808  // columns in right side
809  const Index n = this->m_j_size;
810 
811  // define data mappers for Lhs and Rhs
812  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
813  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
814 
815  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
816  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
817 
818  const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
819  const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
820 
821  typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
822  LeftEvaluator, left_nocontract_t,
823  contract_t, lhs_packet_size,
824  lhs_inner_dim_contiguous,
825  false, Unaligned> LhsMapper;
826 
827  typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
828  RightEvaluator, right_nocontract_t,
829  contract_t, rhs_packet_size,
830  rhs_inner_dim_contiguous,
831  rhs_inner_dim_reordered, Unaligned> RhsMapper;
832 
833  typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
834 
835  typedef internal::TensorContractionKernel<
836  Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper>
837  TensorContractionKernel;
838 
839  // initialize data mappers
840  LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
842 
843  RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
845 
846  OutputMapper output(buffer, m);
847 
848  // Sizes of the blocks to load in cache. See the Goto paper for details.
849  internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar,
851  blocking(k_slice, m, n, num_threads);
852  const Index kc = blocking.kc();
853  const Index mc = numext::mini(m, blocking.mc());
854  const Index nc = numext::mini(n, blocking.nc());
855 
856  typedef typename TensorContractionKernel::LhsBlock LhsBlock;
857  typedef typename TensorContractionKernel::RhsBlock RhsBlock;
858 
859  LhsBlock blockA;
860  RhsBlock blockB;
861 
862  TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc);
863 
864  typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
865  const BlockMemHandle packed_mem =
866  kernel.allocate(this->m_device, &blockA, &blockB);
867 
868  // If a contraction kernel does not support beta, explicitly initialize
869  // output buffer with zeroes.
870  if (!TensorContractionKernel::HasBeta) {
871  this->m_device.fill(buffer, buffer + m * n, Scalar(0));
872  }
873 
874  for(Index i2=0; i2<m; i2+=mc)
875  {
876  const Index actual_mc = numext::mini(i2+mc,m)-i2;
877  for (Index k2 = k_start; k2 < k_end; k2 += kc) {
878  // make sure we don't overshoot right edge of left matrix, then pack vertical panel
879  const Index actual_kc = numext::mini(k2 + kc, k_end) - k2;
880  kernel.packLhs(&blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
881 
882  // If kernel supports beta, there is no need to initialize output
883  // buffer with zeroes.
884  const Scalar alpha = Scalar(1);
885  const Scalar beta = (TensorContractionKernel::HasBeta && k2 == k_start)
886  ? Scalar(0)
887  : Scalar(1);
888 
889  // series of horizontal blocks
890  for (Index j2 = 0; j2 < n; j2 += nc) {
891  // make sure we don't overshoot right edge of right matrix, then pack block
892  const Index actual_nc = numext::mini(j2 + nc, n) - j2;
893  kernel.packRhs(&blockB, rhs.getSubMapper(k2, j2), actual_kc,
894  actual_nc);
895 
896  // call gebp (matrix kernel)
897  // The parameters here are copied from Eigen's GEMM implementation
898  const OutputMapper output_mapper = output.getSubMapper(i2, j2);
899  kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc,
900  actual_nc, alpha, beta);
901 
902  // We are done with this [i2, j2] output block.
903  if (use_output_kernel && k2 + kc >= k_end) {
904  m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2,
905  actual_mc, actual_nc);
906  }
907  }
908  }
909  }
910 
911  kernel.deallocate(this->m_device, packed_mem);
912  }
913 
914  EIGEN_STRONG_INLINE void cleanup() {
917 
918  if (m_result != NULL) {
919  m_device.deallocate(m_result);
920  m_result = NULL;
921  }
922  }
923 
924  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
925  return m_result[index];
926  }
927 
928  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
929  return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
930  }
931 
932  template<int LoadMode>
933  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
934  return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
935  }
936 
937  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; }
938 
939 protected:
941 
945 
949 
954 
958 
960 
966 };
967 
968 
969 // evaluator for default device
970 template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType, typename Device>
973  TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> > {
976 
978  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
979  typedef typename XprType::Index Index;
982 
984 
985  // Most of the code is assuming that both input tensors are ColMajor. If the
986  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
987  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
988  // will pretend B is LHS and A is RHS.
989  typedef std::conditional_t<Layout == static_cast<int>(ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
990  typedef std::conditional_t<Layout == static_cast<int>(ColMajor), RightArgType, LeftArgType> EvalRightArgType;
991 
992  static constexpr int LDims =
993  internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
994  static constexpr int RDims =
995  internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
996  static constexpr int ContractDims = internal::array_size<Indices>::value;
997 
999  typedef array<Index, LDims - ContractDims> left_nocontract_t;
1000  typedef array<Index, RDims - ContractDims> right_nocontract_t;
1001 
1002  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
1003 
1004  // Could we use NumDimensions here?
1006 
1007  TensorEvaluator(const XprType& op, const Device& device) :
1008  Base(op, device) { }
1009 
1010  template <int Alignment>
1011  void evalProduct(Scalar* buffer) const {
1012  TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer));
1013  }
1014 };
1015 
1016 } // end namespace Eigen
1017 
1018 #endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
Matrix3f m
int n
int i
#define EIGEN_ALWAYS_INLINE
#define EIGEN_UNUSED_VARIABLE(var)
#define EIGEN_DEVICE_FUNC
#define EIGEN_DONT_INLINE
#define eigen_assert(x)
#define EIGEN_STATIC_ASSERT(X, MSG)
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS)
#define EIGEN_DEVICE_REF
Definition: TensorMacros.h:36
The tensor base class.
const Indices & indices() const
const OutputKernelType m_output_kernel
Eigen::internal::traits< TensorContractionOp >::Index Index
Eigen::internal::nested< TensorContractionOp >::type Nested
TensorContractionOp(const LhsXprType &lhs, const RhsXprType &rhs, const Indices &dims, const OutputKernelType &output_kernel=OutputKernelType())
Eigen::internal::traits< TensorContractionOp >::StorageKind StorageKind
const internal::remove_all_t< typename LhsXprType::Nested > & lhsExpression() const
internal::gebp_traits< typename LhsXprType::CoeffReturnType, typename RhsXprType::CoeffReturnType >::ResScalar CoeffReturnType
const OutputKernelType & outputKernel() const
Eigen::internal::traits< TensorContractionOp >::Scalar Scalar
const internal::remove_all_t< typename RhsXprType::Nested > & rhsExpression() const
LhsXprType::Nested m_lhs_xpr
RhsXprType::Nested m_rhs_xpr
typename remove_all< T >::type remove_all_t
EIGEN_CONSTEXPR Index first(const T &x) EIGEN_NOEXCEPT
void swap(T &a, T &b)
EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
std::array< T, N > array
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
EIGEN_ALWAYS_INLINE const T1 & choose(Cond< true >, const T1 &first, const T2 &)
Definition: TensorMeta.h:20
DenseIndex TotalSize() const
EIGEN_ALWAYS_INLINE void operator()(const internal::blas_data_mapper< Scalar, Index, ColMajor > &output_mapper, const TensorContractionParams &params, Index i, Index j, Index num_rows, Index num_cols) const
internal::packet_traits< Scalar >::type type
Definition: TensorMeta.h:55
Derived::Index cols
Derived::Index rows
SparseMat::Index size
XprType::CoeffReturnType CoeffReturnType
TensorEvaluator< EvalRightArgType, Device > RightEvaluatorType
void evalGemv(Scalar *buffer) const
StorageMemory< Scalar, Device > Storage
internal::traits< Derived >::Device Device
EvaluatorPointerType data() const
internal::traits< Derived >::LeftArgType LeftArgType
const Device EIGEN_DEVICE_REF m_device
PacketReturnType packet(Index index) const
array< Index, RDims - ContractDims > right_nocontract_t
TensorEvaluator< EvalLeftArgType, Device > LeftEvaluatorType
bool evalSubExprsIfNeeded(EvaluatorPointerType data)
void evalGemmPartialWithoutOutputKernel(Scalar *buffer, Index k_start, Index k_end, int num_threads) const
std::conditional_t< static_cast< int >Layout)==static_cast< int >ColMajor), LeftArgType, RightArgType > EvalLeftArgType
internal::traits< Derived >::RightArgType RightArgType
TensorContractionOp< Indices, LeftArgType, RightArgType, OutputKernelType > XprType
std::remove_const_t< typename XprType::Scalar > Scalar
CoeffReturnType coeff(Index index) const
std::conditional_t< static_cast< int >Layout)==static_cast< int >ColMajor), RightArgType, LeftArgType > EvalRightArgType
TensorEvaluator< EvalRightArgType, Device > m_rightImpl
TensorContractionEvaluatorBase(const XprType &op, const Device &device)
const Dimensions & dimensions() const
void evalGemmPartial(Scalar *buffer, Index k_start, Index k_end, int num_threads) const
internal::TensorBlockNotImplemented TensorBlock
void evalProductSequential(Scalar *buffer) const
TensorContractionParams m_tensor_contraction_params
array< Index, ContractDims > contract_t
internal::traits< Derived >::OutputKernelType OutputKernelType
TensorEvaluator< EvalLeftArgType, Device > m_leftImpl
PacketType< CoeffReturnType, Device >::type PacketReturnType
array< Index, LDims - ContractDims > left_nocontract_t
void evalGemm(Scalar *buffer) const
internal::traits< Derived >::Indices Indices
std::conditional_t< Layout==static_cast< int >ColMajor), RightArgType, LeftArgType > EvalRightArgType
TensorEvaluator< const TensorContractionOp< Indices, LeftArgType, RightArgType, OutputKernelType >, Device > Self
std::conditional_t< Layout==static_cast< int >ColMajor), LeftArgType, RightArgType > EvalLeftArgType
A cost model used to limit the number of threads used for evaluating tensor expression.
const Dimensions & dimensions() const
static constexpr int Layout
bool evalSubExprsIfNeeded(EvaluatorPointerType dest)
std::ptrdiff_t j