TensorPatch.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
24 namespace internal {
25 template<typename PatchDim, typename XprType>
26 struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType>
27 {
28  typedef typename XprType::Scalar Scalar;
29  typedef traits<XprType> XprTraits;
30  typedef typename XprTraits::StorageKind StorageKind;
31  typedef typename XprTraits::Index Index;
32  typedef typename XprType::Nested Nested;
33  typedef std::remove_reference_t<Nested> Nested_;
34  static constexpr int NumDimensions = XprTraits::NumDimensions + 1;
35  static constexpr int Layout = XprTraits::Layout;
36  typedef typename XprTraits::PointerType PointerType;
37 };
38 
39 template<typename PatchDim, typename XprType>
40 struct eval<TensorPatchOp<PatchDim, XprType>, Eigen::Dense>
41 {
42  typedef const TensorPatchOp<PatchDim, XprType>& type;
43 };
44 
45 template<typename PatchDim, typename XprType>
46 struct nested<TensorPatchOp<PatchDim, XprType>, 1, typename eval<TensorPatchOp<PatchDim, XprType> >::type>
47 {
48  typedef TensorPatchOp<PatchDim, XprType> type;
49 };
50 
51 } // end namespace internal
52 
53 
54 
55 template<typename PatchDim, typename XprType>
56 class TensorPatchOp : public TensorBase<TensorPatchOp<PatchDim, XprType>, ReadOnlyAccessors>
57 {
58  public:
59  typedef typename Eigen::internal::traits<TensorPatchOp>::Scalar Scalar;
61  typedef typename XprType::CoeffReturnType CoeffReturnType;
62  typedef typename Eigen::internal::nested<TensorPatchOp>::type Nested;
63  typedef typename Eigen::internal::traits<TensorPatchOp>::StorageKind StorageKind;
64  typedef typename Eigen::internal::traits<TensorPatchOp>::Index Index;
65 
66  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims)
67  : m_xpr(expr), m_patch_dims(patch_dims) {}
68 
70  const PatchDim& patch_dims() const { return m_patch_dims; }
71 
74  expression() const { return m_xpr; }
75 
76  protected:
77  typename XprType::Nested m_xpr;
78  const PatchDim m_patch_dims;
79 };
80 
81 
82 // Eval as rvalue
83 template<typename PatchDim, typename ArgType, typename Device>
84 struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
85 {
87  typedef typename XprType::Index Index;
88  static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value + 1;
90  typedef typename XprType::Scalar Scalar;
96 
98  enum {
99  IsAligned = false,
101  BlockAccess = false,
103  CoordAccess = false,
104  RawAccess = false
105  };
106 
107  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
108  typedef internal::TensorBlockNotImplemented TensorBlock;
109  //===--------------------------------------------------------------------===//
110 
111  EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
112  : m_impl(op.expression(), device)
113  {
114  Index num_patches = 1;
115  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
116  const PatchDim& patch_dims = op.patch_dims();
117  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
118  for (int i = 0; i < NumDims-1; ++i) {
119  m_dimensions[i] = patch_dims[i];
120  num_patches *= (input_dims[i] - patch_dims[i] + 1);
121  }
122  m_dimensions[NumDims-1] = num_patches;
123 
124  m_inputStrides[0] = 1;
125  m_patchStrides[0] = 1;
126  for (int i = 1; i < NumDims-1; ++i) {
127  m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
128  m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1);
129  }
130  m_outputStrides[0] = 1;
131  for (int i = 1; i < NumDims; ++i) {
132  m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
133  }
134  } else {
135  for (int i = 0; i < NumDims-1; ++i) {
136  m_dimensions[i+1] = patch_dims[i];
137  num_patches *= (input_dims[i] - patch_dims[i] + 1);
138  }
139  m_dimensions[0] = num_patches;
140 
141  m_inputStrides[NumDims-2] = 1;
142  m_patchStrides[NumDims-2] = 1;
143  for (int i = NumDims-3; i >= 0; --i) {
144  m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
145  m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1);
146  }
147  m_outputStrides[NumDims-1] = 1;
148  for (int i = NumDims-2; i >= 0; --i) {
149  m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
150  }
151  }
152  }
153 
154  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
155 
156  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
157  m_impl.evalSubExprsIfNeeded(NULL);
158  return true;
159  }
160 
161  EIGEN_STRONG_INLINE void cleanup() {
162  m_impl.cleanup();
163  }
164 
165  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
166  {
167  Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0;
168  // Find the location of the first element of the patch.
169  Index patchIndex = index / m_outputStrides[output_stride_index];
170  // Find the offset of the element wrt the location of the first element.
171  Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index];
172  Index inputIndex = 0;
173  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
175  for (int i = NumDims - 2; i > 0; --i) {
176  const Index patchIdx = patchIndex / m_patchStrides[i];
177  patchIndex -= patchIdx * m_patchStrides[i];
178  const Index offsetIdx = patchOffset / m_outputStrides[i];
179  patchOffset -= offsetIdx * m_outputStrides[i];
180  inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
181  }
182  } else {
184  for (int i = 0; i < NumDims - 2; ++i) {
185  const Index patchIdx = patchIndex / m_patchStrides[i];
186  patchIndex -= patchIdx * m_patchStrides[i];
187  const Index offsetIdx = patchOffset / m_outputStrides[i+1];
188  patchOffset -= offsetIdx * m_outputStrides[i+1];
189  inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
190  }
191  }
192  inputIndex += (patchIndex + patchOffset);
193  return m_impl.coeff(inputIndex);
194  }
195 
196  template<int LoadMode>
197  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
198  {
199  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
200 
201  Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0;
202  Index indices[2] = {index, index + PacketSize - 1};
203  Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index],
204  indices[1] / m_outputStrides[output_stride_index]};
205  Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index],
206  indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]};
207 
208  Index inputIndices[2] = {0, 0};
209  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
211  for (int i = NumDims - 2; i > 0; --i) {
212  const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i],
213  patchIndices[1] / m_patchStrides[i]};
214  patchIndices[0] -= patchIdx[0] * m_patchStrides[i];
215  patchIndices[1] -= patchIdx[1] * m_patchStrides[i];
216 
217  const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i],
218  patchOffsets[1] / m_outputStrides[i]};
219  patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i];
220  patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i];
221 
222  inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i];
223  inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i];
224  }
225  } else {
227  for (int i = 0; i < NumDims - 2; ++i) {
228  const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i],
229  patchIndices[1] / m_patchStrides[i]};
230  patchIndices[0] -= patchIdx[0] * m_patchStrides[i];
231  patchIndices[1] -= patchIdx[1] * m_patchStrides[i];
232 
233  const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1],
234  patchOffsets[1] / m_outputStrides[i+1]};
235  patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1];
236  patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1];
237 
238  inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i];
239  inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i];
240  }
241  }
242  inputIndices[0] += (patchIndices[0] + patchOffsets[0]);
243  inputIndices[1] += (patchIndices[1] + patchOffsets[1]);
244 
245  if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
246  PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
247  return rslt;
248  }
249  else {
251  values[0] = m_impl.coeff(inputIndices[0]);
252  values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
254  for (int i = 1; i < PacketSize-1; ++i) {
255  values[i] = coeff(index+i);
256  }
257  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
258  return rslt;
259  }
260  }
261 
262  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
263  const double compute_cost = NumDims * (TensorOpCost::DivCost<Index>() +
264  TensorOpCost::MulCost<Index>() +
265  2 * TensorOpCost::AddCost<Index>());
266  return m_impl.costPerCoeff(vectorized) +
267  TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
268  }
269 
270  EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
271 
272  protected:
277 
279 
280 };
281 
282 } // end namespace Eigen
283 
284 #endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
int i
#define EIGEN_ALIGN_MAX
#define EIGEN_UNROLL_LOOP
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
The tensor base class.
Eigen::internal::traits< TensorPatchOp >::StorageKind StorageKind
Definition: TensorPatch.h:63
const PatchDim & patch_dims() const
Definition: TensorPatch.h:70
const PatchDim m_patch_dims
Definition: TensorPatch.h:78
Eigen::internal::traits< TensorPatchOp >::Index Index
Definition: TensorPatch.h:64
Eigen::internal::traits< TensorPatchOp >::Scalar Scalar
Definition: TensorPatch.h:59
Eigen::NumTraits< Scalar >::Real RealScalar
Definition: TensorPatch.h:60
XprType::CoeffReturnType CoeffReturnType
Definition: TensorPatch.h:61
Eigen::internal::nested< TensorPatchOp >::type Nested
Definition: TensorPatch.h:62
const internal::remove_all_t< typename XprType::Nested > & expression() const
Definition: TensorPatch.h:74
TensorPatchOp(const XprType &expr, const PatchDim &patch_dims)
Definition: TensorPatch.h:66
XprType::Nested m_xpr
Definition: TensorPatch.h:77
typename remove_all< T >::type remove_all_t
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
std::array< T, N > array
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
internal::packet_traits< Scalar >::type type
Definition: TensorMeta.h:55
PacketType< CoeffReturnType, Device >::type PacketReturnType
Definition: TensorPatch.h:92
A cost model used to limit the number of threads used for evaluating tensor expression.
const Dimensions & dimensions() const
static constexpr int Layout
CoeffReturnType coeff(Index index) const
static constexpr int PacketSize