TensorPadding.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
24 namespace internal {
25 template<typename PaddingDimensions, typename XprType>
26 struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprType>
27 {
28  typedef typename XprType::Scalar Scalar;
29  typedef traits<XprType> XprTraits;
30  typedef typename XprTraits::StorageKind StorageKind;
31  typedef typename XprTraits::Index Index;
32  typedef typename XprType::Nested Nested;
33  typedef std::remove_reference_t<Nested> Nested_;
34  static constexpr int NumDimensions = XprTraits::NumDimensions;
35  static constexpr int Layout = XprTraits::Layout;
36  typedef typename XprTraits::PointerType PointerType;
37 };
38 
39 template<typename PaddingDimensions, typename XprType>
40 struct eval<TensorPaddingOp<PaddingDimensions, XprType>, Eigen::Dense>
41 {
42  typedef const TensorPaddingOp<PaddingDimensions, XprType>& type;
43 };
44 
45 template<typename PaddingDimensions, typename XprType>
46 struct nested<TensorPaddingOp<PaddingDimensions, XprType>, 1, typename eval<TensorPaddingOp<PaddingDimensions, XprType> >::type>
47 {
48  typedef TensorPaddingOp<PaddingDimensions, XprType> type;
49 };
50 
51 } // end namespace internal
52 
53 
54 
55 template<typename PaddingDimensions, typename XprType>
56 class TensorPaddingOp : public TensorBase<TensorPaddingOp<PaddingDimensions, XprType>, ReadOnlyAccessors>
57 {
58  public:
59  typedef typename Eigen::internal::traits<TensorPaddingOp>::Scalar Scalar;
61  typedef typename XprType::CoeffReturnType CoeffReturnType;
62  typedef typename Eigen::internal::nested<TensorPaddingOp>::type Nested;
63  typedef typename Eigen::internal::traits<TensorPaddingOp>::StorageKind StorageKind;
64  typedef typename Eigen::internal::traits<TensorPaddingOp>::Index Index;
65 
66  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims, const Scalar padding_value)
67  : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {}
68 
70  const PaddingDimensions& padding() const { return m_padding_dims; }
72  Scalar padding_value() const { return m_padding_value; }
73 
76  expression() const { return m_xpr; }
77 
78  protected:
79  typename XprType::Nested m_xpr;
80  const PaddingDimensions m_padding_dims;
82 };
83 
84 
85 // Eval as rvalue
86 template<typename PaddingDimensions, typename ArgType, typename Device>
87 struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device>
88 {
90  typedef typename XprType::Index Index;
91  static constexpr int NumDims = internal::array_size<PaddingDimensions>::value;
93  typedef typename XprType::Scalar Scalar;
99 
101  enum {
102  IsAligned = true,
105  PreferBlockAccess = true,
106  CoordAccess = true,
107  RawAccess = false
108  };
109 
110  typedef std::remove_const_t<Scalar> ScalarNoConst;
111 
112  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
113  typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
114  typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
115 
116  typedef typename internal::TensorMaterializedBlock<ScalarNoConst, NumDims,
117  Layout, Index>
119  //===--------------------------------------------------------------------===//
120 
121  EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
122  : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()), m_device(device)
123  {
124  // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead
125  // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector
126  // of 1 element first and then pad.
127  EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
128 
129  // Compute dimensions
130  m_dimensions = m_impl.dimensions();
131  for (int i = 0; i < NumDims; ++i) {
132  m_dimensions[i] += m_padding[i].first + m_padding[i].second;
133  }
134  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
135  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
136  m_inputStrides[0] = 1;
137  m_outputStrides[0] = 1;
138  for (int i = 1; i < NumDims; ++i) {
139  m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
140  m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
141  }
142  m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1];
143  } else {
144  m_inputStrides[NumDims - 1] = 1;
145  m_outputStrides[NumDims] = 1;
146  for (int i = NumDims - 2; i >= 0; --i) {
147  m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
148  m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1];
149  }
150  m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0];
151  }
152  }
153 
154  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
155 
156  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
157  m_impl.evalSubExprsIfNeeded(NULL);
158  return true;
159  }
160 
161 #ifdef EIGEN_USE_THREADS
162  template <typename EvalSubExprsCallback>
163  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
164  EvaluatorPointerType, EvalSubExprsCallback done) {
165  m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
166  }
167 #endif // EIGEN_USE_THREADS
168 
169  EIGEN_STRONG_INLINE void cleanup() {
170  m_impl.cleanup();
171  }
172 
173  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
174  {
175  eigen_assert(index < dimensions().TotalSize());
176  Index inputIndex = 0;
177  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
179  for (int i = NumDims - 1; i > 0; --i) {
180  const Index idx = index / m_outputStrides[i];
181  if (isPaddingAtIndexForDim(idx, i)) {
182  return m_paddingValue;
183  }
184  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
185  index -= idx * m_outputStrides[i];
186  }
187  if (isPaddingAtIndexForDim(index, 0)) {
188  return m_paddingValue;
189  }
190  inputIndex += (index - m_padding[0].first);
191  } else {
193  for (int i = 0; i < NumDims - 1; ++i) {
194  const Index idx = index / m_outputStrides[i+1];
195  if (isPaddingAtIndexForDim(idx, i)) {
196  return m_paddingValue;
197  }
198  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
199  index -= idx * m_outputStrides[i+1];
200  }
201  if (isPaddingAtIndexForDim(index, NumDims-1)) {
202  return m_paddingValue;
203  }
204  inputIndex += (index - m_padding[NumDims-1].first);
205  }
206  return m_impl.coeff(inputIndex);
207  }
208 
209  template<int LoadMode>
210  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
211  {
212  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
213  return packetColMajor(index);
214  }
215  return packetRowMajor(index);
216  }
217 
218  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
219  TensorOpCost cost = m_impl.costPerCoeff(vectorized);
220  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
222  for (int i = 0; i < NumDims; ++i)
223  updateCostPerDimension(cost, i, i == 0);
224  } else {
226  for (int i = NumDims - 1; i >= 0; --i)
227  updateCostPerDimension(cost, i, i == NumDims - 1);
228  }
229  return cost;
230  }
231 
232  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
233  internal::TensorBlockResourceRequirements getResourceRequirements() const {
234  const size_t target_size = m_device.lastLevelCacheSize();
235  return internal::TensorBlockResourceRequirements::merge(
236  internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size),
237  m_impl.getResourceRequirements());
238  }
239 
240  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
242  bool /*root_of_expr_ast*/ = false) const {
243  // If one of the dimensions is zero, return empty block view.
244  if (desc.size() == 0) {
246  desc.dimensions());
247  }
248 
249  static const bool IsColMajor = Layout == static_cast<int>(ColMajor);
250  const int inner_dim_idx = IsColMajor ? 0 : NumDims - 1;
251 
252  Index offset = desc.offset();
253 
254  // Compute offsets in the output tensor corresponding to the desc.offset().
255  DSizes<Index, NumDims> output_offsets;
256  for (int i = NumDims - 1; i > 0; --i) {
257  const int dim = IsColMajor ? i : NumDims - i - 1;
258  const int stride_dim = IsColMajor ? dim : dim + 1;
259  output_offsets[dim] = offset / m_outputStrides[stride_dim];
260  offset -= output_offsets[dim] * m_outputStrides[stride_dim];
261  }
262  output_offsets[inner_dim_idx] = offset;
263 
264  // Offsets in the input corresponding to output offsets.
265  DSizes<Index, NumDims> input_offsets = output_offsets;
266  for (int i = 0; i < NumDims; ++i) {
267  const int dim = IsColMajor ? i : NumDims - i - 1;
268  input_offsets[dim] = input_offsets[dim] - m_padding[dim].first;
269  }
270 
271  // Compute offset in the input buffer (at this point it might be illegal and
272  // point outside of the input buffer, because we don't check for negative
273  // offsets, it will be autocorrected in the block iteration loop below).
274  Index input_offset = 0;
275  for (int i = 0; i < NumDims; ++i) {
276  const int dim = IsColMajor ? i : NumDims - i - 1;
277  input_offset += input_offsets[dim] * m_inputStrides[dim];
278  }
279 
280  // Destination buffer and scratch buffer both indexed from 0 and have the
281  // same dimensions as the requested block (for destination buffer this
282  // property is guaranteed by `desc.destination()`).
283  Index output_offset = 0;
284  const DSizes<Index, NumDims> output_strides =
285  internal::strides<Layout>(desc.dimensions());
286 
287  // NOTE(ezhulenev): We initialize bock iteration state for `NumDims - 1`
288  // dimensions, skipping innermost dimension. In theory it should be possible
289  // to squeeze matching innermost dimensions, however in practice that did
290  // not show any improvements in benchmarks. Also in practice first outer
291  // dimension usually has padding, and will prevent squeezing.
292 
293  // Initialize output block iterator state. Dimension in this array are
294  // always in inner_most -> outer_most order (col major layout).
295  array<BlockIteratorState, NumDims - 1> it;
296  for (int i = 0; i < NumDims - 1; ++i) {
297  const int dim = IsColMajor ? i + 1 : NumDims - i - 2;
298  it[i].count = 0;
299  it[i].size = desc.dimension(dim);
300 
301  it[i].input_stride = m_inputStrides[dim];
302  it[i].input_span = it[i].input_stride * (it[i].size - 1);
303 
304  it[i].output_stride = output_strides[dim];
305  it[i].output_span = it[i].output_stride * (it[i].size - 1);
306  }
307 
308  const Index input_inner_dim_size =
309  static_cast<Index>(m_impl.dimensions()[inner_dim_idx]);
310 
311  // Total output size.
312  const Index output_size = desc.size();
313 
314  // We will fill inner dimension of this size in the output. It might be
315  // larger than the inner dimension in the input, so we might have to pad
316  // before/after we copy values from the input inner dimension.
317  const Index output_inner_dim_size = desc.dimension(inner_dim_idx);
318 
319  // How many values to fill with padding BEFORE reading from the input inner
320  // dimension.
321  const Index output_inner_pad_before_size =
322  input_offsets[inner_dim_idx] < 0
323  ? numext::mini(numext::abs(input_offsets[inner_dim_idx]),
324  output_inner_dim_size)
325  : 0;
326 
327  // How many values we can actually copy from the input inner dimension.
328  const Index output_inner_copy_size = numext::mini(
329  // Want to copy from input.
330  (output_inner_dim_size - output_inner_pad_before_size),
331  // Can copy from input.
332  numext::maxi(input_inner_dim_size - (input_offsets[inner_dim_idx] +
333  output_inner_pad_before_size),
334  Index(0)));
335 
336  eigen_assert(output_inner_copy_size >= 0);
337 
338  // How many values to fill with padding AFTER reading from the input inner
339  // dimension.
340  const Index output_inner_pad_after_size =
341  (output_inner_dim_size - output_inner_copy_size -
342  output_inner_pad_before_size);
343 
344  // Sanity check, sum of all sizes must be equal to the output size.
345  eigen_assert(output_inner_dim_size ==
346  (output_inner_pad_before_size + output_inner_copy_size +
347  output_inner_pad_after_size));
348 
349  // Keep track of current coordinates and padding in the output.
350  DSizes<Index, NumDims> output_coord = output_offsets;
351  DSizes<Index, NumDims> output_padded;
352  for (int i = 0; i < NumDims; ++i) {
353  const int dim = IsColMajor ? i : NumDims - i - 1;
354  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
355  }
356 
357  typedef internal::StridedLinearBufferCopy<ScalarNoConst, Index> LinCopy;
358 
359  // Prepare storage for the materialized padding result.
360  const typename TensorBlock::Storage block_storage =
361  TensorBlock::prepareStorage(desc, scratch);
362 
363  // TODO(ezhulenev): Squeeze multiple non-padded inner dimensions into a
364  // single logical inner dimension.
365 
366  // When possible we squeeze writes for the innermost (only if non-padded)
367  // dimension with the first padded dimension. This allows to reduce the
368  // number of calls to LinCopy and better utilize vector instructions.
369  const bool squeeze_writes =
370  NumDims > 1 &&
371  // inner dimension is not padded
372  (input_inner_dim_size == m_dimensions[inner_dim_idx]) &&
373  // and equal to the block inner dimension
374  (input_inner_dim_size == output_inner_dim_size);
375 
376  const int squeeze_dim = IsColMajor ? inner_dim_idx + 1 : inner_dim_idx - 1;
377 
378  // Maximum coordinate on a squeeze dimension that we can write to.
379  const Index squeeze_max_coord =
380  squeeze_writes ? numext::mini(
381  // max non-padded element in the input
382  static_cast<Index>(m_dimensions[squeeze_dim] -
383  m_padding[squeeze_dim].second),
384  // max element in the output buffer
385  static_cast<Index>(output_offsets[squeeze_dim] +
386  desc.dimension(squeeze_dim)))
387  : static_cast<Index>(0);
388 
389  // Iterate copying data from `m_impl.data()` to the output buffer.
390  for (Index size = 0; size < output_size;) {
391  // Detect if we are in the padded region (exclude innermost dimension).
392  bool is_padded = false;
393  for (int j = 1; j < NumDims; ++j) {
394  const int dim = IsColMajor ? j : NumDims - j - 1;
395  is_padded = output_padded[dim];
396  if (is_padded) break;
397  }
398 
399  if (is_padded) {
400  // Fill single innermost dimension with padding value.
401  size += output_inner_dim_size;
402 
403  LinCopy::template Run<LinCopy::Kind::FillLinear>(
404  typename LinCopy::Dst(output_offset, 1, block_storage.data()),
405  typename LinCopy::Src(0, 0, &m_paddingValue),
406  output_inner_dim_size);
407 
408 
409  } else if (squeeze_writes) {
410  // Squeeze multiple reads from innermost dimensions.
411  const Index squeeze_num = squeeze_max_coord - output_coord[squeeze_dim];
412  size += output_inner_dim_size * squeeze_num;
413 
414  // Copy `squeeze_num` inner dimensions from input to output.
415  LinCopy::template Run<LinCopy::Kind::Linear>(
416  typename LinCopy::Dst(output_offset, 1, block_storage.data()),
417  typename LinCopy::Src(input_offset, 1, m_impl.data()),
418  output_inner_dim_size * squeeze_num);
419 
420  // Update iteration state for only `squeeze_num - 1` processed inner
421  // dimensions, because we have another iteration state update at the end
422  // of the loop that will update iteration state for the last inner
423  // processed dimension.
424  it[0].count += (squeeze_num - 1);
425  input_offset += it[0].input_stride * (squeeze_num - 1);
426  output_offset += it[0].output_stride * (squeeze_num - 1);
427  output_coord[squeeze_dim] += (squeeze_num - 1);
428 
429  } else {
430  // Single read from innermost dimension.
431  size += output_inner_dim_size;
432 
433  { // Fill with padding before copying from input inner dimension.
434  const Index out = output_offset;
435 
436  LinCopy::template Run<LinCopy::Kind::FillLinear>(
437  typename LinCopy::Dst(out, 1, block_storage.data()),
438  typename LinCopy::Src(0, 0, &m_paddingValue),
439  output_inner_pad_before_size);
440  }
441 
442  { // Copy data from input inner dimension.
443  const Index out = output_offset + output_inner_pad_before_size;
444  const Index in = input_offset + output_inner_pad_before_size;
445 
446  eigen_assert(output_inner_copy_size == 0 || m_impl.data() != NULL);
447 
448  LinCopy::template Run<LinCopy::Kind::Linear>(
449  typename LinCopy::Dst(out, 1, block_storage.data()),
450  typename LinCopy::Src(in, 1, m_impl.data()),
451  output_inner_copy_size);
452  }
453 
454  { // Fill with padding after copying from input inner dimension.
455  const Index out = output_offset + output_inner_pad_before_size +
456  output_inner_copy_size;
457 
458  LinCopy::template Run<LinCopy::Kind::FillLinear>(
459  typename LinCopy::Dst(out, 1, block_storage.data()),
460  typename LinCopy::Src(0, 0, &m_paddingValue),
461  output_inner_pad_after_size);
462  }
463  }
464 
465  for (int j = 0; j < NumDims - 1; ++j) {
466  const int dim = IsColMajor ? j + 1 : NumDims - j - 2;
467 
468  if (++it[j].count < it[j].size) {
469  input_offset += it[j].input_stride;
470  output_offset += it[j].output_stride;
471  output_coord[dim] += 1;
472  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
473  break;
474  }
475  it[j].count = 0;
476  input_offset -= it[j].input_span;
477  output_offset -= it[j].output_span;
478  output_coord[dim] -= it[j].size - 1;
479  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
480  }
481  }
482 
483  return block_storage.AsTensorMaterializedBlock();
484  }
485 
486  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; }
487 
488  private:
489  struct BlockIteratorState {
491  : count(0),
492  size(0),
493  input_stride(0),
494  input_span(0),
495  output_stride(0),
496  output_span(0) {}
497 
504  };
505 
507  Index index, int dim_index) const {
508  return (!internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0) &&
509  index < m_padding[dim_index].first) ||
510  (!internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0) &&
511  index >= m_dimensions[dim_index] - m_padding[dim_index].second);
512  }
513 
515  int dim_index) const {
516  return internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0);
517  }
518 
520  int dim_index) const {
521  return internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0);
522  }
523 
524 
525  void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const {
526  const double in = static_cast<double>(m_impl.dimensions()[i]);
527  const double out = in + m_padding[i].first + m_padding[i].second;
528  if (out == 0)
529  return;
530  const double reduction = in / out;
531  cost *= reduction;
532  if (first) {
533  cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() +
534  reduction * (1 * TensorOpCost::AddCost<Index>()));
535  } else {
536  cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() +
537  2 * TensorOpCost::MulCost<Index>() +
538  reduction * (2 * TensorOpCost::MulCost<Index>() +
539  1 * TensorOpCost::DivCost<Index>()));
540  }
541  }
542 
543  protected:
544 
545  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
546  {
547  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
548 
549  const Index initialIndex = index;
550  Index inputIndex = 0;
552  for (int i = NumDims - 1; i > 0; --i) {
553  const Index firstIdx = index;
554  const Index lastIdx = index + PacketSize - 1;
555  const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i];
556  const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i];
557  const Index lastPaddedRight = m_outputStrides[i+1];
558 
559  if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
560  // all the coefficient are in the padding zone.
561  return internal::pset1<PacketReturnType>(m_paddingValue);
562  }
563  else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
564  // all the coefficient are in the padding zone.
565  return internal::pset1<PacketReturnType>(m_paddingValue);
566  }
567  else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
568  // all the coefficient are between the 2 padding zones.
569  const Index idx = index / m_outputStrides[i];
570  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
571  index -= idx * m_outputStrides[i];
572  }
573  else {
574  // Every other case
575  return packetWithPossibleZero(initialIndex);
576  }
577  }
578 
579  const Index lastIdx = index + PacketSize - 1;
580  const Index firstIdx = index;
581  const Index lastPaddedLeft = m_padding[0].first;
582  const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second);
583  const Index lastPaddedRight = m_outputStrides[1];
584 
585  if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) {
586  // all the coefficient are in the padding zone.
587  return internal::pset1<PacketReturnType>(m_paddingValue);
588  }
589  else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
590  // all the coefficient are in the padding zone.
591  return internal::pset1<PacketReturnType>(m_paddingValue);
592  }
593  else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
594  // all the coefficient are between the 2 padding zones.
595  inputIndex += (index - m_padding[0].first);
596  return m_impl.template packet<Unaligned>(inputIndex);
597  }
598  // Every other case
599  return packetWithPossibleZero(initialIndex);
600  }
601 
602  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
603  {
604  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
605 
606  const Index initialIndex = index;
607  Index inputIndex = 0;
609  for (int i = 0; i < NumDims - 1; ++i) {
610  const Index firstIdx = index;
611  const Index lastIdx = index + PacketSize - 1;
612  const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1];
613  const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1];
614  const Index lastPaddedRight = m_outputStrides[i];
615 
616  if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
617  // all the coefficient are in the padding zone.
618  return internal::pset1<PacketReturnType>(m_paddingValue);
619  }
620  else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
621  // all the coefficient are in the padding zone.
622  return internal::pset1<PacketReturnType>(m_paddingValue);
623  }
624  else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
625  // all the coefficient are between the 2 padding zones.
626  const Index idx = index / m_outputStrides[i+1];
627  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
628  index -= idx * m_outputStrides[i+1];
629  }
630  else {
631  // Every other case
632  return packetWithPossibleZero(initialIndex);
633  }
634  }
635 
636  const Index lastIdx = index + PacketSize - 1;
637  const Index firstIdx = index;
638  const Index lastPaddedLeft = m_padding[NumDims-1].first;
639  const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second);
640  const Index lastPaddedRight = m_outputStrides[NumDims-1];
641 
642  if (!isLeftPaddingCompileTimeZero(NumDims-1) && lastIdx < lastPaddedLeft) {
643  // all the coefficient are in the padding zone.
644  return internal::pset1<PacketReturnType>(m_paddingValue);
645  }
646  else if (!isRightPaddingCompileTimeZero(NumDims-1) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
647  // all the coefficient are in the padding zone.
648  return internal::pset1<PacketReturnType>(m_paddingValue);
649  }
650  else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
651  // all the coefficient are between the 2 padding zones.
652  inputIndex += (index - m_padding[NumDims-1].first);
653  return m_impl.template packet<Unaligned>(inputIndex);
654  }
655  // Every other case
656  return packetWithPossibleZero(initialIndex);
657  }
658 
660  {
661  EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
663  for (int i = 0; i < PacketSize; ++i) {
664  values[i] = coeff(index+i);
665  }
666  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
667  return rslt;
668  }
669 
674  PaddingDimensions m_padding;
675 
677 
679 };
680 
681 
682 
683 
684 } // end namespace Eigen
685 
686 #endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
int i
#define EIGEN_ALIGN_MAX
#define EIGEN_ALWAYS_INLINE
#define EIGEN_UNROLL_LOOP
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
#define EIGEN_STATIC_ASSERT(X, MSG)
#define EIGEN_DEVICE_REF
Definition: TensorMacros.h:36
The tensor base class.
XprType::CoeffReturnType CoeffReturnType
Definition: TensorPadding.h:61
TensorPaddingOp(const XprType &expr, const PaddingDimensions &padding_dims, const Scalar padding_value)
Definition: TensorPadding.h:66
Eigen::internal::traits< TensorPaddingOp >::Index Index
Definition: TensorPadding.h:64
const PaddingDimensions m_padding_dims
Definition: TensorPadding.h:80
Eigen::internal::traits< TensorPaddingOp >::StorageKind StorageKind
Definition: TensorPadding.h:63
XprType::Nested m_xpr
Definition: TensorPadding.h:79
Scalar padding_value() const
Definition: TensorPadding.h:72
Eigen::NumTraits< Scalar >::Real RealScalar
Definition: TensorPadding.h:60
const Scalar m_padding_value
Definition: TensorPadding.h:81
const internal::remove_all_t< typename XprType::Nested > & expression() const
Definition: TensorPadding.h:76
Eigen::internal::traits< TensorPaddingOp >::Scalar Scalar
Definition: TensorPadding.h:59
Eigen::internal::nested< TensorPaddingOp >::type Nested
Definition: TensorPadding.h:62
const PaddingDimensions & padding() const
Definition: TensorPadding.h:70
typename remove_all< T >::type remove_all_t
EIGEN_CONSTEXPR Index first(const T &x) EIGEN_NOEXCEPT
EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
EIGEN_ALWAYS_INLINE std::enable_if_t< NumTraits< T >::IsSigned||NumTraits< T >::IsComplex, typename NumTraits< T >::Real > abs(const T &x)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
std::array< T, N > array
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
internal::packet_traits< Scalar >::type type
Definition: TensorMeta.h:55
SparseMat::Index size
EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim(Index index, int dim_index) const
TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
internal::TensorMaterializedBlock< ScalarNoConst, NumDims, Layout, Index > TensorBlock
A cost model used to limit the number of threads used for evaluating tensor expression.
const Dimensions & dimensions() const
static constexpr int Layout
const Device EIGEN_DEVICE_REF m_device
CoeffReturnType coeff(Index index) const
Storage::Type EvaluatorPointerType
static constexpr int PacketSize
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock
std::remove_const_t< Scalar > ScalarNoConst
std::ptrdiff_t j