TensorVolumePatch.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 
4 #ifndef EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
5 #define EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
6 
8 
9 namespace Eigen {
10 
26 namespace internal {
27 
28 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
29 struct traits<TensorVolumePatchOp<Planes, Rows, Cols, XprType> > : public traits<XprType>
30 {
31  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
32  typedef traits<XprType> XprTraits;
33  typedef typename XprTraits::StorageKind StorageKind;
34  typedef typename XprTraits::Index Index;
35  typedef typename XprType::Nested Nested;
36  typedef std::remove_reference_t<Nested> Nested_;
37  static constexpr int NumDimensions = XprTraits::NumDimensions + 1;
38  static constexpr int Layout = XprTraits::Layout;
39  typedef typename XprTraits::PointerType PointerType;
40 
41 };
42 
43 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
44 struct eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Eigen::Dense>
45 {
46  typedef const TensorVolumePatchOp<Planes, Rows, Cols, XprType>& type;
47 };
48 
49 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
50 struct nested<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, 1, typename eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType> >::type>
51 {
52  typedef TensorVolumePatchOp<Planes, Rows, Cols, XprType> type;
53 };
54 
55 } // end namespace internal
56 
57 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
58 class TensorVolumePatchOp : public TensorBase<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, ReadOnlyAccessors>
59 {
60  public:
61  typedef typename Eigen::internal::traits<TensorVolumePatchOp>::Scalar Scalar;
63  typedef typename XprType::CoeffReturnType CoeffReturnType;
64  typedef typename Eigen::internal::nested<TensorVolumePatchOp>::type Nested;
65  typedef typename Eigen::internal::traits<TensorVolumePatchOp>::StorageKind StorageKind;
66  typedef typename Eigen::internal::traits<TensorVolumePatchOp>::Index Index;
67 
79 
95 
99  DenseIndex patch_rows() const { return m_patch_rows; }
101  DenseIndex patch_cols() const { return m_patch_cols; }
121  bool padding_explicit() const { return m_padding_explicit; }
138 
141  expression() const { return m_xpr; }
142 
143  protected:
144  typename XprType::Nested m_xpr;
157  const bool m_padding_explicit;
166 };
167 
168 
169 // Eval as rvalue
170 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
171 struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, Device>
172 {
174  typedef typename XprType::Index Index;
175  static constexpr int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
176  static constexpr int NumDims = NumInputDims + 1;
178  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
184 
186  enum {
187  IsAligned = false,
189  BlockAccess = false,
191  CoordAccess = false,
192  RawAccess = false
193  };
194 
195  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
196  typedef internal::TensorBlockNotImplemented TensorBlock;
197  //===--------------------------------------------------------------------===//
198 
199  EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) :
200  m_impl(op.expression(), device)
201  {
202  EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE);
203 
204  m_paddingValue = op.padding_value();
205 
206  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
207 
208  // Cache a few variables.
209  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
210  m_inputDepth = input_dims[0];
211  m_inputPlanes = input_dims[1];
212  m_inputRows = input_dims[2];
213  m_inputCols = input_dims[3];
214  } else {
215  m_inputDepth = input_dims[NumInputDims-1];
216  m_inputPlanes = input_dims[NumInputDims-2];
217  m_inputRows = input_dims[NumInputDims-3];
218  m_inputCols = input_dims[NumInputDims-4];
219  }
220 
221  m_plane_strides = op.plane_strides();
222  m_row_strides = op.row_strides();
223  m_col_strides = op.col_strides();
224 
225  // Input strides and effective input/patch size
226  m_in_plane_strides = op.in_plane_strides();
227  m_in_row_strides = op.in_row_strides();
228  m_in_col_strides = op.in_col_strides();
229  m_plane_inflate_strides = op.plane_inflate_strides();
230  m_row_inflate_strides = op.row_inflate_strides();
231  m_col_inflate_strides = op.col_inflate_strides();
232 
233  // The "effective" spatial size after inflating data with zeros.
234  m_input_planes_eff = (m_inputPlanes - 1) * m_plane_inflate_strides + 1;
235  m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
236  m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
237  m_patch_planes_eff = op.patch_planes() + (op.patch_planes() - 1) * (m_in_plane_strides - 1);
238  m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
239  m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
240 
241  if (op.padding_explicit()) {
242  m_outputPlanes = numext::ceil((m_input_planes_eff + op.padding_top_z() + op.padding_bottom_z() - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
243  m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
244  m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
245  m_planePaddingTop = op.padding_top_z();
246  m_rowPaddingTop = op.padding_top();
247  m_colPaddingLeft = op.padding_left();
248  } else {
249  // Computing padding from the type
250  switch (op.padding_type()) {
251  case PADDING_VALID:
252  m_outputPlanes = numext::ceil((m_input_planes_eff - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
253  m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
254  m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
255  m_planePaddingTop = 0;
256  m_rowPaddingTop = 0;
257  m_colPaddingLeft = 0;
258  break;
259  case PADDING_SAME: {
260  m_outputPlanes = numext::ceil(m_input_planes_eff / static_cast<float>(m_plane_strides));
261  m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
262  m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
263  const Index dz = (m_outputPlanes - 1) * m_plane_strides + m_patch_planes_eff - m_input_planes_eff;
264  const Index dy = (m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff;
265  const Index dx = (m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff;
266  m_planePaddingTop = dz / 2;
267  m_rowPaddingTop = dy / 2;
268  m_colPaddingLeft = dx / 2;
269  break;
270  }
271  default:
272  eigen_assert(false && "unexpected padding");
273  }
274  }
275  eigen_assert(m_outputRows > 0);
276  eigen_assert(m_outputCols > 0);
277  eigen_assert(m_outputPlanes > 0);
278 
279  // Dimensions for result of extraction.
280  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
281  // ColMajor
282  // 0: depth
283  // 1: patch_planes
284  // 2: patch_rows
285  // 3: patch_cols
286  // 4: number of patches
287  // 5 and beyond: anything else (such as batch).
288  m_dimensions[0] = input_dims[0];
289  m_dimensions[1] = op.patch_planes();
290  m_dimensions[2] = op.patch_rows();
291  m_dimensions[3] = op.patch_cols();
292  m_dimensions[4] = m_outputPlanes * m_outputRows * m_outputCols;
293  for (int i = 5; i < NumDims; ++i) {
294  m_dimensions[i] = input_dims[i-1];
295  }
296  } else {
297  // RowMajor
298  // NumDims-1: depth
299  // NumDims-2: patch_planes
300  // NumDims-3: patch_rows
301  // NumDims-4: patch_cols
302  // NumDims-5: number of patches
303  // NumDims-6 and beyond: anything else (such as batch).
304  m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
305  m_dimensions[NumDims-2] = op.patch_planes();
306  m_dimensions[NumDims-3] = op.patch_rows();
307  m_dimensions[NumDims-4] = op.patch_cols();
308  m_dimensions[NumDims-5] = m_outputPlanes * m_outputRows * m_outputCols;
309  for (int i = NumDims-6; i >= 0; --i) {
310  m_dimensions[i] = input_dims[i];
311  }
312  }
313 
314  // Strides for the output tensor.
315  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
316  m_rowStride = m_dimensions[1];
317  m_colStride = m_dimensions[2] * m_rowStride;
318  m_patchStride = m_colStride * m_dimensions[3] * m_dimensions[0];
319  m_otherStride = m_patchStride * m_dimensions[4];
320  } else {
321  m_rowStride = m_dimensions[NumDims-2];
322  m_colStride = m_dimensions[NumDims-3] * m_rowStride;
323  m_patchStride = m_colStride * m_dimensions[NumDims-4] * m_dimensions[NumDims-1];
324  m_otherStride = m_patchStride * m_dimensions[NumDims-5];
325  }
326 
327  // Strides for navigating through the input tensor.
328  m_planeInputStride = m_inputDepth;
329  m_rowInputStride = m_inputDepth * m_inputPlanes;
330  m_colInputStride = m_inputDepth * m_inputRows * m_inputPlanes;
331  m_otherInputStride = m_inputDepth * m_inputRows * m_inputCols * m_inputPlanes;
332 
333  m_outputPlanesRows = m_outputPlanes * m_outputRows;
334 
335  // Fast representations of different variables.
336  m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
337 
338  m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
339  m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
340  m_fastRowStride = internal::TensorIntDivisor<Index>(m_rowStride);
341  m_fastInputRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
342  m_fastInputColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
343  m_fastInputPlaneStride = internal::TensorIntDivisor<Index>(m_plane_inflate_strides);
344  m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
345  m_fastOutputPlanes = internal::TensorIntDivisor<Index>(m_outputPlanes);
346  m_fastOutputPlanesRows = internal::TensorIntDivisor<Index>(m_outputPlanesRows);
347 
348  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
349  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
350  } else {
351  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
352  }
353  }
354 
355  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
356 
357  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
358  m_impl.evalSubExprsIfNeeded(NULL);
359  return true;
360  }
361 
362  EIGEN_STRONG_INLINE void cleanup() {
363  m_impl.cleanup();
364  }
365 
366  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
367  {
368  // Patch index corresponding to the passed in index.
369  const Index patchIndex = index / m_fastPatchStride;
370 
371  // Spatial offset within the patch. This has to be translated into 3D
372  // coordinates within the patch.
373  const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
374 
375  // Batch, etc.
376  const Index otherIndex = (NumDims == 5) ? 0 : index / m_fastOtherStride;
377  const Index patch3DIndex = (NumDims == 5) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
378 
379  // Calculate column index in the input original tensor.
380  const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
381  const Index colOffset = patchOffset / m_fastColStride;
382  const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
383  const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
384  if (inputCol < 0 || inputCol >= m_input_cols_eff ||
385  ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
386  return Scalar(m_paddingValue);
387  }
388 
389  // Calculate row index in the original input tensor.
390  const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
391  const Index rowOffset = (patchOffset - colOffset * m_colStride) / m_fastRowStride;
392  const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
393  const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
394  if (inputRow < 0 || inputRow >= m_input_rows_eff ||
395  ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
396  return Scalar(m_paddingValue);
397  }
398 
399  // Calculate plane index in the original input tensor.
400  const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
401  const Index planeOffset = patchOffset - colOffset * m_colStride - rowOffset * m_rowStride;
402  const Index inputPlane = planeIndex * m_plane_strides + planeOffset * m_in_plane_strides - m_planePaddingTop;
403  const Index origInputPlane = (m_plane_inflate_strides == 1) ? inputPlane : ((inputPlane >= 0) ? (inputPlane / m_fastInputPlaneStride) : 0);
404  if (inputPlane < 0 || inputPlane >= m_input_planes_eff ||
405  ((m_plane_inflate_strides != 1) && (inputPlane != origInputPlane * m_plane_inflate_strides))) {
406  return Scalar(m_paddingValue);
407  }
408 
409  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
410  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
411 
412  const Index inputIndex = depth +
413  origInputRow * m_rowInputStride +
414  origInputCol * m_colInputStride +
415  origInputPlane * m_planeInputStride +
416  otherIndex * m_otherInputStride;
417 
418  return m_impl.coeff(inputIndex);
419  }
420 
421  template<int LoadMode>
422  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
423  {
424  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
425 
426  if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1 ||
427  m_in_plane_strides != 1 || m_plane_inflate_strides != 1) {
428  return packetWithPossibleZero(index);
429  }
430 
431  const Index indices[2] = {index, index + PacketSize - 1};
432  const Index patchIndex = indices[0] / m_fastPatchStride;
433  if (patchIndex != indices[1] / m_fastPatchStride) {
434  return packetWithPossibleZero(index);
435  }
436  const Index otherIndex = (NumDims == 5) ? 0 : indices[0] / m_fastOtherStride;
437  eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
438 
439  // Find the offset of the element wrt the location of the first element.
440  const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
441  (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
442 
443  const Index patch3DIndex = (NumDims == 5) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
444  eigen_assert(patch3DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
445 
446  const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
447  const Index colOffsets[2] = {
448  patchOffsets[0] / m_fastColStride,
449  patchOffsets[1] / m_fastColStride};
450 
451  // Calculate col indices in the original input tensor.
452  const Index inputCols[2] = {
453  colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft,
454  colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
455  if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
456  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
457  }
458 
459  if (inputCols[0] != inputCols[1]) {
460  return packetWithPossibleZero(index);
461  }
462 
463  const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
464  const Index rowOffsets[2] = {
465  (patchOffsets[0] - colOffsets[0] * m_colStride) / m_fastRowStride,
466  (patchOffsets[1] - colOffsets[1] * m_colStride) / m_fastRowStride};
467  eigen_assert(rowOffsets[0] <= rowOffsets[1]);
468  // Calculate col indices in the original input tensor.
469  const Index inputRows[2] = {
470  rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop,
471  rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
472 
473  if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
474  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
475  }
476 
477  if (inputRows[0] != inputRows[1]) {
478  return packetWithPossibleZero(index);
479  }
480 
481  const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
482  const Index planeOffsets[2] = {
483  patchOffsets[0] - colOffsets[0] * m_colStride - rowOffsets[0] * m_rowStride,
484  patchOffsets[1] - colOffsets[1] * m_colStride - rowOffsets[1] * m_rowStride};
485  eigen_assert(planeOffsets[0] <= planeOffsets[1]);
486  const Index inputPlanes[2] = {
487  planeIndex * m_plane_strides + planeOffsets[0] - m_planePaddingTop,
488  planeIndex * m_plane_strides + planeOffsets[1] - m_planePaddingTop};
489 
490  if (inputPlanes[1] < 0 || inputPlanes[0] >= m_inputPlanes) {
491  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
492  }
493 
494  if (inputPlanes[0] >= 0 && inputPlanes[1] < m_inputPlanes) {
495  // no padding
496  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
497  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
498  const Index inputIndex = depth +
499  inputRows[0] * m_rowInputStride +
500  inputCols[0] * m_colInputStride +
501  m_planeInputStride * inputPlanes[0] +
502  otherIndex * m_otherInputStride;
503  return m_impl.template packet<Unaligned>(inputIndex);
504  }
505 
506  return packetWithPossibleZero(index);
507  }
508 
509  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
510  costPerCoeff(bool vectorized) const {
511  const double compute_cost =
512  10 * TensorOpCost::DivCost<Index>() + 21 * TensorOpCost::MulCost<Index>() +
513  8 * TensorOpCost::AddCost<Index>();
514  return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
515  }
516 
517  EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
518 
519  const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
520 
521 
522  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planePaddingTop() const { return m_planePaddingTop; }
523  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; }
524  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; }
525  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputPlanes() const { return m_outputPlanes; }
526  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; }
527  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; }
528  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userPlaneStride() const { return m_plane_strides; }
529  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; }
530  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; }
531  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInPlaneStride() const { return m_in_plane_strides; }
532  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; }
533  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; }
534  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planeInflateStride() const { return m_plane_inflate_strides; }
535  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; }
536  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; }
537 
538  protected:
540  {
541  EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
543  for (int i = 0; i < PacketSize; ++i) {
544  values[i] = coeff(index+i);
545  }
546  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
547  return rslt;
548  }
549 
551 
552  // Parameters passed to the constructor.
556 
560 
564 
568 
572 
573  // Cached input size.
578 
579  // Other cached variables.
581 
582  // Effective input/patch post-inflation size.
589 
590  // Strides for the output tensor.
595 
596  // Strides for the input tensor.
601 
602  internal::TensorIntDivisor<Index> m_fastOtherStride;
603  internal::TensorIntDivisor<Index> m_fastPatchStride;
604  internal::TensorIntDivisor<Index> m_fastColStride;
605  internal::TensorIntDivisor<Index> m_fastRowStride;
606  internal::TensorIntDivisor<Index> m_fastInputPlaneStride;
607  internal::TensorIntDivisor<Index> m_fastInputRowStride;
608  internal::TensorIntDivisor<Index> m_fastInputColStride;
609  internal::TensorIntDivisor<Index> m_fastInputColsEff;
610  internal::TensorIntDivisor<Index> m_fastOutputPlanesRows;
611  internal::TensorIntDivisor<Index> m_fastOutputPlanes;
612  internal::TensorIntDivisor<Index> m_fastOutputDepth;
613 
615 
617 
618 
619 };
620 
621 
622 } // end namespace Eigen
623 
624 #endif // EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
int i
#define EIGEN_ALIGN_MAX
#define EIGEN_UNROLL_LOOP
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
#define EIGEN_STATIC_ASSERT(X, MSG)
The tensor base class.
DenseIndex padding_right() const
const DenseIndex m_plane_strides
XprType::CoeffReturnType CoeffReturnType
DenseIndex padding_bottom_z() const
const DenseIndex m_row_inflate_strides
DenseIndex in_plane_strides() const
DenseIndex patch_rows() const
DenseIndex patch_cols() const
DenseIndex col_strides() const
Eigen::internal::traits< TensorVolumePatchOp >::Index Index
DenseIndex plane_strides() const
PaddingType padding_type() const
const DenseIndex m_padding_bottom_z
TensorVolumePatchOp(const XprType &expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, PaddingType padding_type, Scalar padding_value)
TensorVolumePatchOp(const XprType &expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, DenseIndex padding_top_z, DenseIndex padding_bottom_z, DenseIndex padding_top, DenseIndex padding_bottom, DenseIndex padding_left, DenseIndex padding_right, Scalar padding_value)
DenseIndex in_row_strides() const
const DenseIndex m_plane_inflate_strides
const DenseIndex m_in_row_strides
DenseIndex in_col_strides() const
const internal::remove_all_t< typename XprType::Nested > & expression() const
Eigen::internal::nested< TensorVolumePatchOp >::type Nested
const PaddingType m_padding_type
const DenseIndex m_padding_right
DenseIndex patch_planes() const
const DenseIndex m_padding_bottom
Eigen::NumTraits< Scalar >::Real RealScalar
DenseIndex padding_bottom() const
DenseIndex padding_top() const
DenseIndex padding_left() const
Eigen::internal::traits< TensorVolumePatchOp >::Scalar Scalar
DenseIndex row_inflate_strides() const
Eigen::internal::traits< TensorVolumePatchOp >::StorageKind StorageKind
const DenseIndex m_col_inflate_strides
DenseIndex plane_inflate_strides() const
const DenseIndex m_in_col_strides
DenseIndex padding_top_z() const
DenseIndex col_inflate_strides() const
const DenseIndex m_padding_top_z
const DenseIndex m_in_plane_strides
DenseIndex row_strides() const
typename remove_all< T >::type remove_all_t
Scalar() ceil(const Scalar &x)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
@ PADDING_VALID
Definition: TensorTraits.h:260
@ PADDING_SAME
Definition: TensorTraits.h:261
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex
internal::packet_traits< Scalar >::type type
Definition: TensorMeta.h:55
A cost model used to limit the number of threads used for evaluating tensor expression.
const Dimensions & dimensions() const
static constexpr int Layout
Derived::Scalar Scalar
CoeffReturnType coeff(Index index) const
static constexpr int PacketSize