CoreEvaluators.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
5 // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
7 //
8 // This Source Code Form is subject to the terms of the Mozilla
9 // Public License v. 2.0. If a copy of the MPL was not distributed
10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 
12 
13 #ifndef EIGEN_COREEVALUATORS_H
14 #define EIGEN_COREEVALUATORS_H
15 
16 #include "./InternalHeaderCheck.h"
17 
18 namespace Eigen {
19 
20 namespace internal {
21 
22 // This class returns the evaluator kind from the expression storage kind.
23 // Default assumes index based accessors
24 template<typename StorageKind>
25 struct storage_kind_to_evaluator_kind {
26  typedef IndexBased Kind;
27 };
28 
29 // This class returns the evaluator shape from the expression storage kind.
30 // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
31 template<typename StorageKind> struct storage_kind_to_shape;
32 
33 template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; };
34 template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; };
35 template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; };
36 template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; };
37 
38 // Evaluators have to be specialized with respect to various criteria such as:
39 // - storage/structure/shape
40 // - scalar type
41 // - etc.
42 // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
43 // We currently distinguish the following kind of evaluators:
44 // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
45 // - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
46 // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
47 // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
48 // - mapbase_evaluator for Map, Block, Ref
49 // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
50 
51 template< typename T,
52  typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind,
53  typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind,
54  typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind,
55  typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
56  typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
57  typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
58 
59 template< typename T,
60  typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
61  typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
62  typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
63  typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
64 
65 template< typename T,
66  typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind,
67  typename Scalar = typename T::Scalar> struct unary_evaluator;
68 
69 // evaluator_traits<T> contains traits for evaluator<T>
70 
71 template<typename T>
72 struct evaluator_traits_base
73 {
74  // by default, get evaluator kind and shape from storage
75  typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;
76  typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
77 };
78 
79 // Default evaluator traits
80 template<typename T>
81 struct evaluator_traits : public evaluator_traits_base<T>
82 {
83 };
84 
85 template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
86 struct evaluator_assume_aliasing {
87  static const bool value = false;
88 };
89 
90 // By default, we assume a unary expression:
91 template<typename T>
92 struct evaluator : public unary_evaluator<T>
93 {
94  typedef unary_evaluator<T> Base;
95  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
96  explicit evaluator(const T& xpr) : Base(xpr) {}
97 };
98 
99 
100 // TODO: Think about const-correctness
101 template<typename T>
102 struct evaluator<const T>
103  : evaluator<T>
104 {
105  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
106  explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
107 };
108 
109 // ---------- base class for all evaluators ----------
110 
111 template<typename ExpressionType>
112 struct evaluator_base
113 {
114  // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
115  typedef traits<ExpressionType> ExpressionTraits;
116 
117  enum {
118  Alignment = 0
119  };
120  // noncopyable:
121  // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)
122  // and make complex evaluator much larger than then should do.
123  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {}
124  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {}
125 private:
126  EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&);
127  EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&);
128 };
129 
130 // -------------------- Matrix and Array --------------------
131 //
132 // evaluator<PlainObjectBase> is a common base class for the
133 // Matrix and Array evaluators.
134 // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
135 // so no need for more sophisticated dispatching.
136 
137 // this helper permits to completely eliminate m_outerStride if it is known at compiletime.
138 template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
139 public:
140  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
141  plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
142  {
143 #ifndef EIGEN_INTERNAL_DEBUGGING
144  EIGEN_UNUSED_VARIABLE(outerStride);
145 #endif
146  eigen_internal_assert(outerStride==OuterStride);
147  }
148  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
149  Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; }
150  const Scalar *data;
151 };
152 
153 template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
154 public:
155  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
156  plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
157  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
158  Index outerStride() const { return m_outerStride; }
159  const Scalar *data;
160 protected:
161  Index m_outerStride;
162 };
163 
164 template<typename Derived>
165 struct evaluator<PlainObjectBase<Derived> >
166  : evaluator_base<Derived>
167 {
168  typedef PlainObjectBase<Derived> PlainObjectType;
169  typedef typename PlainObjectType::Scalar Scalar;
170  typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
171 
172  enum {
173  IsRowMajor = PlainObjectType::IsRowMajor,
174  IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
175  RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
176  ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
177 
178  CoeffReadCost = NumTraits<Scalar>::ReadCost,
179  Flags = traits<Derived>::EvaluatorFlags,
180  Alignment = traits<Derived>::Alignment
181  };
182  enum {
183  // We do not need to know the outer stride for vectors
184  OuterStrideAtCompileTime = IsVectorAtCompileTime ? 0
185  : int(IsRowMajor) ? ColsAtCompileTime
186  : RowsAtCompileTime
187  };
188 
189  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
190  evaluator()
191  : m_d(0,OuterStrideAtCompileTime)
192  {
193  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
194  }
195 
196  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
197  explicit evaluator(const PlainObjectType& m)
198  : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
199  {
200  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
201  }
202 
203  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
204  CoeffReturnType coeff(Index row, Index col) const
205  {
206  if (IsRowMajor)
207  return m_d.data[row * m_d.outerStride() + col];
208  else
209  return m_d.data[row + col * m_d.outerStride()];
210  }
211 
212  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
213  CoeffReturnType coeff(Index index) const
214  {
215  return m_d.data[index];
216  }
217 
218  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
219  Scalar& coeffRef(Index row, Index col)
220  {
221  if (IsRowMajor)
222  return const_cast<Scalar*>(m_d.data)[row * m_d.outerStride() + col];
223  else
224  return const_cast<Scalar*>(m_d.data)[row + col * m_d.outerStride()];
225  }
226 
227  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
228  Scalar& coeffRef(Index index)
229  {
230  return const_cast<Scalar*>(m_d.data)[index];
231  }
232 
233  template<int LoadMode, typename PacketType>
234  EIGEN_STRONG_INLINE
235  PacketType packet(Index row, Index col) const
236  {
237  if (IsRowMajor)
238  return ploadt<PacketType, LoadMode>(m_d.data + row * m_d.outerStride() + col);
239  else
240  return ploadt<PacketType, LoadMode>(m_d.data + row + col * m_d.outerStride());
241  }
242 
243  template<int LoadMode, typename PacketType>
244  EIGEN_STRONG_INLINE
245  PacketType packet(Index index) const
246  {
247  return ploadt<PacketType, LoadMode>(m_d.data + index);
248  }
249 
250  template<int StoreMode,typename PacketType>
251  EIGEN_STRONG_INLINE
252  void writePacket(Index row, Index col, const PacketType& x)
253  {
254  if (IsRowMajor)
255  return pstoret<Scalar, PacketType, StoreMode>
256  (const_cast<Scalar*>(m_d.data) + row * m_d.outerStride() + col, x);
257  else
258  return pstoret<Scalar, PacketType, StoreMode>
259  (const_cast<Scalar*>(m_d.data) + row + col * m_d.outerStride(), x);
260  }
261 
262  template<int StoreMode, typename PacketType>
263  EIGEN_STRONG_INLINE
264  void writePacket(Index index, const PacketType& x)
265  {
266  return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_d.data) + index, x);
267  }
268 
269 protected:
270 
271  plainobjectbase_evaluator_data<Scalar,OuterStrideAtCompileTime> m_d;
272 };
273 
274 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
275 struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
276  : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
277 {
278  typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
279 
280  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
281  evaluator() {}
282 
283  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
284  explicit evaluator(const XprType& m)
285  : evaluator<PlainObjectBase<XprType> >(m)
286  { }
287 };
288 
289 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
290 struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
291  : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
292 {
293  typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
294 
295  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
296  evaluator() {}
297 
298  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
299  explicit evaluator(const XprType& m)
300  : evaluator<PlainObjectBase<XprType> >(m)
301  { }
302 };
303 
304 // -------------------- Transpose --------------------
305 
306 template<typename ArgType>
307 struct unary_evaluator<Transpose<ArgType>, IndexBased>
308  : evaluator_base<Transpose<ArgType> >
309 {
310  typedef Transpose<ArgType> XprType;
311 
312  enum {
313  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
314  Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
315  Alignment = evaluator<ArgType>::Alignment
316  };
317 
318  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
319  explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
320 
321  typedef typename XprType::Scalar Scalar;
322  typedef typename XprType::CoeffReturnType CoeffReturnType;
323 
324  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
325  CoeffReturnType coeff(Index row, Index col) const
326  {
327  return m_argImpl.coeff(col, row);
328  }
329 
330  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
331  CoeffReturnType coeff(Index index) const
332  {
333  return m_argImpl.coeff(index);
334  }
335 
336  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
337  Scalar& coeffRef(Index row, Index col)
338  {
339  return m_argImpl.coeffRef(col, row);
340  }
341 
342  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
343  typename XprType::Scalar& coeffRef(Index index)
344  {
345  return m_argImpl.coeffRef(index);
346  }
347 
348  template<int LoadMode, typename PacketType>
349  EIGEN_STRONG_INLINE
350  PacketType packet(Index row, Index col) const
351  {
352  return m_argImpl.template packet<LoadMode,PacketType>(col, row);
353  }
354 
355  template<int LoadMode, typename PacketType>
356  EIGEN_STRONG_INLINE
357  PacketType packet(Index index) const
358  {
359  return m_argImpl.template packet<LoadMode,PacketType>(index);
360  }
361 
362  template<int StoreMode, typename PacketType>
363  EIGEN_STRONG_INLINE
364  void writePacket(Index row, Index col, const PacketType& x)
365  {
366  m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
367  }
368 
369  template<int StoreMode, typename PacketType>
370  EIGEN_STRONG_INLINE
371  void writePacket(Index index, const PacketType& x)
372  {
373  m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
374  }
375 
376 protected:
377  evaluator<ArgType> m_argImpl;
378 };
379 
380 // -------------------- CwiseNullaryOp --------------------
381 // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
382 // Likewise, there is not need to more sophisticated dispatching here.
383 
384 template<typename Scalar,typename NullaryOp,
385  bool has_nullary = has_nullary_operator<NullaryOp>::value,
386  bool has_unary = has_unary_operator<NullaryOp>::value,
387  bool has_binary = has_binary_operator<NullaryOp>::value>
388 struct nullary_wrapper
389 {
390  template <typename IndexType>
391  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
392  template <typename IndexType>
393  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
394 
395  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
396  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
397 };
398 
399 template<typename Scalar,typename NullaryOp>
400 struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
401 {
402  template <typename IndexType>
403  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
404  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
405 };
406 
407 template<typename Scalar,typename NullaryOp>
408 struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
409 {
410  template <typename IndexType>
411  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
412  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
413 };
414 
415 // We need the following specialization for vector-only functors assigned to a runtime vector,
416 // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
417 // In this case, i==0 and j is used for the actual iteration.
418 template<typename Scalar,typename NullaryOp>
419 struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
420 {
421  template <typename IndexType>
422  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
423  eigen_assert(i==0 || j==0);
424  return op(i+j);
425  }
426  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
427  eigen_assert(i==0 || j==0);
428  return op.template packetOp<T>(i+j);
429  }
430 
431  template <typename IndexType>
432  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
433  template <typename T, typename IndexType>
434  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
435 };
436 
437 template<typename Scalar,typename NullaryOp>
438 struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
439 
440 #if 0 && EIGEN_COMP_MSVC>0
441 // Disable this ugly workaround. This is now handled in traits<Ref>::match,
442 // but this piece of code might still become handly if some other weird compilation
443 // erros pop up again.
444 
445 // MSVC exhibits a weird compilation error when
446 // compiling:
447 // Eigen::MatrixXf A = MatrixXf::Random(3,3);
448 // Ref<const MatrixXf> R = 2.f*A;
449 // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
450 // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
451 // and at that time has_*ary_operator<T> returns true regardless of T.
452 // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
453 // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
454 // and packet() are really instantiated as implemented below:
455 
456 // This is a simple wrapper around Index to enforce the re-instantiation of
457 // has_*ary_operator when needed.
458 template<typename T> struct nullary_wrapper_workaround_msvc {
459  nullary_wrapper_workaround_msvc(const T&);
460  operator T()const;
461 };
462 
463 template<typename Scalar,typename NullaryOp>
464 struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
465 {
466  template <typename IndexType>
467  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
468  return nullary_wrapper<Scalar,NullaryOp,
469  has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
470  has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
471  has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
472  }
473  template <typename IndexType>
474  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
475  return nullary_wrapper<Scalar,NullaryOp,
476  has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
477  has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
478  has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
479  }
480 
481  template <typename T, typename IndexType>
482  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
483  return nullary_wrapper<Scalar,NullaryOp,
484  has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
485  has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
486  has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
487  }
488  template <typename T, typename IndexType>
489  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
490  return nullary_wrapper<Scalar,NullaryOp,
491  has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
492  has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
493  has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
494  }
495 };
496 #endif // MSVC workaround
497 
498 template<typename NullaryOp, typename PlainObjectType>
499 struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
500  : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
501 {
502  typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
503  typedef internal::remove_all_t<PlainObjectType> PlainObjectTypeCleaned;
504 
505  enum {
506  CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
507 
508  Flags = (evaluator<PlainObjectTypeCleaned>::Flags
509  & ( HereditaryBits
510  | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
511  | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
512  | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
513  Alignment = AlignedMax
514  };
515 
516  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
517  : m_functor(n.functor()), m_wrapper()
518  {
519  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
520  }
521 
522  typedef typename XprType::CoeffReturnType CoeffReturnType;
523 
524  template <typename IndexType>
525  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
526  CoeffReturnType coeff(IndexType row, IndexType col) const
527  {
528  return m_wrapper(m_functor, row, col);
529  }
530 
531  template <typename IndexType>
532  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
533  CoeffReturnType coeff(IndexType index) const
534  {
535  return m_wrapper(m_functor,index);
536  }
537 
538  template<int LoadMode, typename PacketType, typename IndexType>
539  EIGEN_STRONG_INLINE
540  PacketType packet(IndexType row, IndexType col) const
541  {
542  return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
543  }
544 
545  template<int LoadMode, typename PacketType, typename IndexType>
546  EIGEN_STRONG_INLINE
547  PacketType packet(IndexType index) const
548  {
549  return m_wrapper.template packetOp<PacketType>(m_functor, index);
550  }
551 
552 protected:
553  const NullaryOp m_functor;
554  const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
555 };
556 
557 // -------------------- CwiseUnaryOp --------------------
558 
559 template<typename UnaryOp, typename ArgType>
560 struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
561  : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
562 {
563  typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
564 
565  enum {
566  CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
567 
568  Flags = evaluator<ArgType>::Flags
569  & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
570  Alignment = evaluator<ArgType>::Alignment
571  };
572 
573  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
574  explicit unary_evaluator(const XprType& op) : m_d(op)
575  {
576  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
577  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
578  }
579 
580  typedef typename XprType::CoeffReturnType CoeffReturnType;
581 
582  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
583  CoeffReturnType coeff(Index row, Index col) const
584  {
585  return m_d.func()(m_d.argImpl.coeff(row, col));
586  }
587 
588  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
589  CoeffReturnType coeff(Index index) const
590  {
591  return m_d.func()(m_d.argImpl.coeff(index));
592  }
593 
594  template<int LoadMode, typename PacketType>
595  EIGEN_STRONG_INLINE
596  PacketType packet(Index row, Index col) const
597  {
598  return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(row, col));
599  }
600 
601  template<int LoadMode, typename PacketType>
602  EIGEN_STRONG_INLINE
603  PacketType packet(Index index) const
604  {
605  return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(index));
606  }
607 
608 protected:
609 
610  // this helper permits to completely eliminate the functor if it is empty
611  struct Data
612  {
613  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
614  Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
615  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
616  const UnaryOp& func() const { return op; }
617  UnaryOp op;
618  evaluator<ArgType> argImpl;
619  };
620 
621  Data m_d;
622 };
623 
624 // ----------------------- Casting ---------------------
625 
626 template <typename SrcType, typename DstType, typename ArgType>
627 struct unary_evaluator<CwiseUnaryOp<core_cast_op<SrcType, DstType>, ArgType>, IndexBased> {
628  using CastOp = core_cast_op<SrcType, DstType>;
629  using XprType = CwiseUnaryOp<CastOp, ArgType>;
630 
631  // Use the largest packet type by default
632  using SrcPacketType = typename packet_traits<SrcType>::type;
633  static constexpr int SrcPacketSize = unpacket_traits<SrcPacketType>::size;
634  static constexpr int SrcPacketBytes = SrcPacketSize * sizeof(SrcType);
635 
636  enum {
637  CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<CastOp>::Cost),
638  PacketAccess = functor_traits<CastOp>::PacketAccess,
639  ActualPacketAccessBit = PacketAccess ? PacketAccessBit : 0,
640  Flags = evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | ActualPacketAccessBit),
641  IsRowMajor = (evaluator<ArgType>::Flags & RowMajorBit),
642  Alignment = evaluator<ArgType>::Alignment
643  };
644 
645  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& xpr)
646  : m_argImpl(xpr.nestedExpression()), m_rows(xpr.rows()), m_cols(xpr.cols()) {
647  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<CastOp>::Cost);
648  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
649  }
650 
651  template <typename DstPacketType>
652  using AltSrcScalarOp = std::enable_if_t<(unpacket_traits<DstPacketType>::size < SrcPacketSize && !find_packet_by_size<SrcType, unpacket_traits<DstPacketType>::size>::value), bool>;
653  template <typename DstPacketType>
654  using SrcPacketArgs1 = std::enable_if_t<(find_packet_by_size<SrcType, unpacket_traits<DstPacketType>::size>::value), bool>;
655  template <typename DstPacketType>
656  using SrcPacketArgs2 = std::enable_if_t<(unpacket_traits<DstPacketType>::size) == (2 * SrcPacketSize), bool>;
657  template <typename DstPacketType>
658  using SrcPacketArgs4 = std::enable_if_t<(unpacket_traits<DstPacketType>::size) == (4 * SrcPacketSize), bool>;
659  template <typename DstPacketType>
660  using SrcPacketArgs8 = std::enable_if_t<(unpacket_traits<DstPacketType>::size) == (8 * SrcPacketSize), bool>;
661 
662  template <bool UseRowMajor = IsRowMajor, std::enable_if_t<UseRowMajor, bool> = true>
663  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_array_bounds(Index, Index col, Index packetSize) const {
664  return col + packetSize <= cols();
665  }
666  template <bool UseRowMajor = IsRowMajor, std::enable_if_t<!UseRowMajor, bool> = true>
667  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_array_bounds(Index row, Index, Index packetSize) const {
668  return row + packetSize <= rows();
669  }
670  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_array_bounds(Index index, Index packetSize) const {
671  return index + packetSize <= size();
672  }
673 
674  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SrcType srcCoeff(Index row, Index col, Index offset) const {
675  Index actualRow = IsRowMajor ? row : row + offset;
676  Index actualCol = IsRowMajor ? col + offset : col;
677  return m_argImpl.coeff(actualRow, actualCol);
678  }
679  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SrcType srcCoeff(Index index, Index offset) const {
680  Index actualIndex = index + offset;
681  return m_argImpl.coeff(actualIndex);
682  }
683 
684  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstType coeff(Index row, Index col) const {
685  return cast<SrcType, DstType>(srcCoeff(row, col, 0));
686  }
687  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstType coeff(Index index) const { return cast<SrcType, DstType>(srcCoeff(index, 0)); }
688 
689  template <int LoadMode, typename PacketType = SrcPacketType>
690  EIGEN_STRONG_INLINE PacketType srcPacket(Index row, Index col, Index offset) const {
691  constexpr int PacketSize = unpacket_traits<PacketType>::size;
692  Index actualRow = IsRowMajor ? row : row + (offset * PacketSize);
693  Index actualCol = IsRowMajor ? col + (offset * PacketSize) : col;
694  eigen_assert(check_array_bounds(actualRow, actualCol, PacketSize) && "Array index out of bounds");
695  return m_argImpl.template packet<LoadMode, PacketType>(actualRow, actualCol);
696  }
697  template <int LoadMode, typename PacketType = SrcPacketType>
698  EIGEN_STRONG_INLINE PacketType srcPacket(Index index, Index offset) const {
699  constexpr int PacketSize = unpacket_traits<PacketType>::size;
700  Index actualIndex = index + (offset * PacketSize);
701  eigen_assert(check_array_bounds(actualIndex, PacketSize) && "Array index out of bounds");
702  return m_argImpl.template packet<LoadMode, PacketType>(actualIndex);
703  }
704 
705  // There is no source packet type with equal or fewer elements than DstPacketType.
706  // This is problematic as the evaluation loop may attempt to access data outside the bounds of the array.
707  // For example, consider the cast utilizing pcast<Packet4f,Packet2d> with an array of size 4: {0.0f,1.0f,2.0f,3.0f}.
708  // The first iteration of the evaulation loop will load 16 bytes: {0.0f,1.0f,2.0f,3.0f} and cast to {0.0,1.0}, which is acceptable.
709  // The second iteration will load 16 bytes: {2.0f,3.0f,?,?}, which is outside the bounds of the array.
710 
711  // Instead, perform runtime check to determine if the load would access data outside the bounds of the array.
712  // If not, perform full load. Otherwise, revert to a scalar loop to perform a partial load.
713  // In either case, perform a vectorized cast of the source packet.
714  template <int LoadMode, typename DstPacketType, AltSrcScalarOp<DstPacketType> = true>
715  EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const {
716  constexpr int DstPacketSize = unpacket_traits<DstPacketType>::size;
717  constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType);
718  constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode);
719  SrcPacketType src;
720  if (EIGEN_PREDICT_TRUE(check_array_bounds(row, col, SrcPacketSize))) {
721  src = srcPacket<SrcLoadMode>(row, col, 0);
722  } else {
723  Array<SrcType, SrcPacketSize, 1> srcArray;
724  for (size_t k = 0; k < DstPacketSize; k++) srcArray[k] = srcCoeff(row, col, k);
725  for (size_t k = DstPacketSize; k < SrcPacketSize; k++) srcArray[k] = SrcType(0);
726  src = pload<SrcPacketType>(srcArray.data());
727  }
728  return pcast<SrcPacketType, DstPacketType>(src);
729  }
730  // Use the source packet type with the same size as DstPacketType, if it exists
731  template <int LoadMode, typename DstPacketType, SrcPacketArgs1<DstPacketType> = true>
732  EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const {
733  constexpr int DstPacketSize = unpacket_traits<DstPacketType>::size;
734  using SizedSrcPacketType = typename find_packet_by_size<SrcType, DstPacketSize>::type;
735  constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType);
736  constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode);
737  return pcast<SizedSrcPacketType, DstPacketType>(
738  srcPacket<SrcLoadMode, SizedSrcPacketType>(row, col, 0));
739  }
740  // unpacket_traits<DstPacketType>::size == 2 * SrcPacketSize
741  template <int LoadMode, typename DstPacketType, SrcPacketArgs2<DstPacketType> = true>
742  EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const {
743  constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode);
744  return pcast<SrcPacketType, DstPacketType>(
745  srcPacket<SrcLoadMode>(row, col, 0), srcPacket<SrcLoadMode>(row, col, 1));
746  }
747  // unpacket_traits<DstPacketType>::size == 4 * SrcPacketSize
748  template <int LoadMode, typename DstPacketType, SrcPacketArgs4<DstPacketType> = true>
749  EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const {
750  constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode);
751  return pcast<SrcPacketType, DstPacketType>(
752  srcPacket<SrcLoadMode>(row, col, 0), srcPacket<SrcLoadMode>(row, col, 1),
753  srcPacket<SrcLoadMode>(row, col, 2), srcPacket<SrcLoadMode>(row, col, 3));
754  }
755  // unpacket_traits<DstPacketType>::size == 8 * SrcPacketSize
756  template <int LoadMode, typename DstPacketType, SrcPacketArgs8<DstPacketType> = true>
757  EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const {
758  constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode);
759  return pcast<SrcPacketType, DstPacketType>(
760  srcPacket<SrcLoadMode>(row, col, 0), srcPacket<SrcLoadMode>(row, col, 1),
761  srcPacket<SrcLoadMode>(row, col, 2), srcPacket<SrcLoadMode>(row, col, 3),
762  srcPacket<SrcLoadMode>(row, col, 4), srcPacket<SrcLoadMode>(row, col, 5),
763  srcPacket<SrcLoadMode>(row, col, 6), srcPacket<SrcLoadMode>(row, col, 7));
764  }
765 
766  // Analagous routines for linear access.
767  template <int LoadMode, typename DstPacketType, AltSrcScalarOp<DstPacketType> = true>
768  EIGEN_STRONG_INLINE DstPacketType packet(Index index) const {
769  constexpr int DstPacketSize = unpacket_traits<DstPacketType>::size;
770  constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType);
771  constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode);
772  SrcPacketType src;
773  if (EIGEN_PREDICT_TRUE(check_array_bounds(index, SrcPacketSize))) {
774  src = srcPacket<SrcLoadMode>(index, 0);
775  } else {
776  Array<SrcType, SrcPacketSize, 1> srcArray;
777  for (size_t k = 0; k < DstPacketSize; k++) srcArray[k] = srcCoeff(index, k);
778  for (size_t k = DstPacketSize; k < SrcPacketSize; k++) srcArray[k] = SrcType(0);
779  src = pload<SrcPacketType>(srcArray.data());
780  }
781  return pcast<SrcPacketType, DstPacketType>(src);
782  }
783  template <int LoadMode, typename DstPacketType, SrcPacketArgs1<DstPacketType> = true>
784  EIGEN_STRONG_INLINE DstPacketType packet(Index index) const {
785  constexpr int DstPacketSize = unpacket_traits<DstPacketType>::size;
786  using SizedSrcPacketType = typename find_packet_by_size<SrcType, DstPacketSize>::type;
787  constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType);
788  constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode);
789  return pcast<SizedSrcPacketType, DstPacketType>(
790  srcPacket<SrcLoadMode, SizedSrcPacketType>(index, 0));
791  }
792  template <int LoadMode, typename DstPacketType, SrcPacketArgs2<DstPacketType> = true>
793  EIGEN_STRONG_INLINE DstPacketType packet(Index index) const {
794  constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode);
795  return pcast<SrcPacketType, DstPacketType>(
796  srcPacket<SrcLoadMode>(index, 0), srcPacket<SrcLoadMode>(index, 1));
797  }
798  template <int LoadMode, typename DstPacketType, SrcPacketArgs4<DstPacketType> = true>
799  EIGEN_STRONG_INLINE DstPacketType packet(Index index) const {
800  constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode);
801  return pcast<SrcPacketType, DstPacketType>(
802  srcPacket<SrcLoadMode>(index, 0), srcPacket<SrcLoadMode>(index, 1),
803  srcPacket<SrcLoadMode>(index, 2), srcPacket<SrcLoadMode>(index, 3));
804  }
805  template <int LoadMode, typename DstPacketType, SrcPacketArgs8<DstPacketType> = true>
806  EIGEN_STRONG_INLINE DstPacketType packet(Index index) const {
807  constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode);
808  return pcast<SrcPacketType, DstPacketType>(
809  srcPacket<SrcLoadMode>(index, 0), srcPacket<SrcLoadMode>(index, 1),
810  srcPacket<SrcLoadMode>(index, 2), srcPacket<SrcLoadMode>(index, 3),
811  srcPacket<SrcLoadMode>(index, 4), srcPacket<SrcLoadMode>(index, 5),
812  srcPacket<SrcLoadMode>(index, 6), srcPacket<SrcLoadMode>(index, 7));
813  }
814 
815  constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_rows; }
816  constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_cols; }
817  constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_rows * m_cols; }
818 
819  protected:
820  const evaluator<ArgType> m_argImpl;
821  const variable_if_dynamic<Index, XprType::RowsAtCompileTime> m_rows;
822  const variable_if_dynamic<Index, XprType::ColsAtCompileTime> m_cols;
823 };
824 
825 // -------------------- CwiseTernaryOp --------------------
826 
827 // this is a ternary expression
828 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
829 struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
830  : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
831 {
832  typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
833  typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
834 
835  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
836 };
837 
838 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
839 struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
840  : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
841 {
842  typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
843 
844  enum {
845  CoeffReadCost = int(evaluator<Arg1>::CoeffReadCost) + int(evaluator<Arg2>::CoeffReadCost) + int(evaluator<Arg3>::CoeffReadCost) + int(functor_traits<TernaryOp>::Cost),
846 
847  Arg1Flags = evaluator<Arg1>::Flags,
848  Arg2Flags = evaluator<Arg2>::Flags,
849  Arg3Flags = evaluator<Arg3>::Flags,
850  SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,
851  StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
852  Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
854  | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
855  ( (StorageOrdersAgree ? LinearAccessBit : 0)
856  | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
857  )
858  )
859  ),
860  Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
861  Alignment = plain_enum_min(
862  plain_enum_min(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
863  evaluator<Arg3>::Alignment)
864  };
865 
866  EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)
867  {
868  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
869  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
870  }
871 
872  typedef typename XprType::CoeffReturnType CoeffReturnType;
873 
874  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
875  CoeffReturnType coeff(Index row, Index col) const
876  {
877  return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col));
878  }
879 
880  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
881  CoeffReturnType coeff(Index index) const
882  {
883  return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index));
884  }
885 
886  template<int LoadMode, typename PacketType>
887  EIGEN_STRONG_INLINE
888  PacketType packet(Index row, Index col) const
889  {
890  return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(row, col),
891  m_d.arg2Impl.template packet<LoadMode,PacketType>(row, col),
892  m_d.arg3Impl.template packet<LoadMode,PacketType>(row, col));
893  }
894 
895  template<int LoadMode, typename PacketType>
896  EIGEN_STRONG_INLINE
897  PacketType packet(Index index) const
898  {
899  return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(index),
900  m_d.arg2Impl.template packet<LoadMode,PacketType>(index),
901  m_d.arg3Impl.template packet<LoadMode,PacketType>(index));
902  }
903 
904 protected:
905  // this helper permits to completely eliminate the functor if it is empty
906  struct Data
907  {
908  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
909  Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
910  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
911  const TernaryOp& func() const { return op; }
912  TernaryOp op;
913  evaluator<Arg1> arg1Impl;
914  evaluator<Arg2> arg2Impl;
915  evaluator<Arg3> arg3Impl;
916  };
917 
918  Data m_d;
919 };
920 
921 // -------------------- CwiseBinaryOp --------------------
922 
923 // this is a binary expression
924 template<typename BinaryOp, typename Lhs, typename Rhs>
925 struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
926  : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
927 {
928  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
929  typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
930 
931  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
932  explicit evaluator(const XprType& xpr) : Base(xpr) {}
933 };
934 
935 template<typename BinaryOp, typename Lhs, typename Rhs>
936 struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>
937  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
938 {
939  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
940 
941  enum {
942  CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
943 
944  LhsFlags = evaluator<Lhs>::Flags,
945  RhsFlags = evaluator<Rhs>::Flags,
946  SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
947  StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
948  Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
950  | (int(LhsFlags) & int(RhsFlags) &
951  ( (StorageOrdersAgree ? LinearAccessBit : 0)
952  | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
953  )
954  )
955  ),
956  Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
957  Alignment = plain_enum_min(evaluator<Lhs>::Alignment, evaluator<Rhs>::Alignment)
958  };
959 
960  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
961  explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
962  {
963  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
964  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
965  }
966 
967  typedef typename XprType::CoeffReturnType CoeffReturnType;
968 
969  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
970  CoeffReturnType coeff(Index row, Index col) const
971  {
972  return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col));
973  }
974 
975  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
976  CoeffReturnType coeff(Index index) const
977  {
978  return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index));
979  }
980 
981  template<int LoadMode, typename PacketType>
982  EIGEN_STRONG_INLINE
983  PacketType packet(Index row, Index col) const
984  {
985  return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(row, col),
986  m_d.rhsImpl.template packet<LoadMode,PacketType>(row, col));
987  }
988 
989  template<int LoadMode, typename PacketType>
990  EIGEN_STRONG_INLINE
991  PacketType packet(Index index) const
992  {
993  return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(index),
994  m_d.rhsImpl.template packet<LoadMode,PacketType>(index));
995  }
996 
997 protected:
998 
999  // this helper permits to completely eliminate the functor if it is empty
1000  struct Data
1001  {
1002  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1003  Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
1004  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1005  const BinaryOp& func() const { return op; }
1006  BinaryOp op;
1007  evaluator<Lhs> lhsImpl;
1008  evaluator<Rhs> rhsImpl;
1009  };
1010 
1011  Data m_d;
1012 };
1013 
1014 // -------------------- CwiseUnaryView --------------------
1015 
1016 template<typename UnaryOp, typename ArgType, typename StrideType>
1017 struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType, StrideType>, IndexBased>
1018  : evaluator_base<CwiseUnaryView<UnaryOp, ArgType, StrideType> >
1019 {
1020  typedef CwiseUnaryView<UnaryOp, ArgType, StrideType> XprType;
1021 
1022  enum {
1023  CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
1024 
1025  Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
1026 
1027  Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
1028  };
1029 
1030  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op)
1031  {
1032  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
1033  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1034  }
1035 
1036  typedef typename XprType::Scalar Scalar;
1037  typedef typename XprType::CoeffReturnType CoeffReturnType;
1038 
1039  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1040  CoeffReturnType coeff(Index row, Index col) const
1041  {
1042  return m_d.func()(m_d.argImpl.coeff(row, col));
1043  }
1044 
1045  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1046  CoeffReturnType coeff(Index index) const
1047  {
1048  return m_d.func()(m_d.argImpl.coeff(index));
1049  }
1050 
1051  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1052  Scalar& coeffRef(Index row, Index col)
1053  {
1054  return m_d.func()(m_d.argImpl.coeffRef(row, col));
1055  }
1056 
1057  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1058  Scalar& coeffRef(Index index)
1059  {
1060  return m_d.func()(m_d.argImpl.coeffRef(index));
1061  }
1062 
1063 protected:
1064 
1065  // this helper permits to completely eliminate the functor if it is empty
1066  struct Data
1067  {
1068  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1069  Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
1070  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1071  const UnaryOp& func() const { return op; }
1072  UnaryOp op;
1073  evaluator<ArgType> argImpl;
1074  };
1075 
1076  Data m_d;
1077 };
1078 
1079 // -------------------- Map --------------------
1080 
1081 // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
1082 // but that might complicate template specialization
1083 template<typename Derived, typename PlainObjectType>
1084 struct mapbase_evaluator;
1085 
1086 template<typename Derived, typename PlainObjectType>
1087 struct mapbase_evaluator : evaluator_base<Derived>
1088 {
1089  typedef Derived XprType;
1090  typedef typename XprType::PointerType PointerType;
1091  typedef typename XprType::Scalar Scalar;
1092  typedef typename XprType::CoeffReturnType CoeffReturnType;
1093 
1094  enum {
1095  IsRowMajor = XprType::RowsAtCompileTime,
1096  ColsAtCompileTime = XprType::ColsAtCompileTime,
1097  CoeffReadCost = NumTraits<Scalar>::ReadCost
1098  };
1099 
1100  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1101  explicit mapbase_evaluator(const XprType& map)
1102  : m_data(const_cast<PointerType>(map.data())),
1103  m_innerStride(map.innerStride()),
1104  m_outerStride(map.outerStride())
1105  {
1106  EIGEN_STATIC_ASSERT(check_implication((evaluator<Derived>::Flags & PacketAccessBit) != 0,
1107  internal::inner_stride_at_compile_time<Derived>::ret == 1),
1108  PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
1109  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1110  }
1111 
1112  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1113  CoeffReturnType coeff(Index row, Index col) const
1114  {
1115  return m_data[col * colStride() + row * rowStride()];
1116  }
1117 
1118  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1119  CoeffReturnType coeff(Index index) const
1120  {
1121  return m_data[index * m_innerStride.value()];
1122  }
1123 
1124  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1125  Scalar& coeffRef(Index row, Index col)
1126  {
1127  return m_data[col * colStride() + row * rowStride()];
1128  }
1129 
1130  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1131  Scalar& coeffRef(Index index)
1132  {
1133  return m_data[index * m_innerStride.value()];
1134  }
1135 
1136  template<int LoadMode, typename PacketType>
1137  EIGEN_STRONG_INLINE
1138  PacketType packet(Index row, Index col) const
1139  {
1140  PointerType ptr = m_data + row * rowStride() + col * colStride();
1141  return internal::ploadt<PacketType, LoadMode>(ptr);
1142  }
1143 
1144  template<int LoadMode, typename PacketType>
1145  EIGEN_STRONG_INLINE
1146  PacketType packet(Index index) const
1147  {
1148  return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
1149  }
1150 
1151  template<int StoreMode, typename PacketType>
1152  EIGEN_STRONG_INLINE
1153  void writePacket(Index row, Index col, const PacketType& x)
1154  {
1155  PointerType ptr = m_data + row * rowStride() + col * colStride();
1156  return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
1157  }
1158 
1159  template<int StoreMode, typename PacketType>
1160  EIGEN_STRONG_INLINE
1161  void writePacket(Index index, const PacketType& x)
1162  {
1163  internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
1164  }
1165 protected:
1166  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1167  Index rowStride() const EIGEN_NOEXCEPT {
1168  return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value();
1169  }
1170  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1171  Index colStride() const EIGEN_NOEXCEPT {
1172  return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value();
1173  }
1174 
1175  PointerType m_data;
1176  const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
1177  const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
1178 };
1179 
1180 template<typename PlainObjectType, int MapOptions, typename StrideType>
1181 struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
1182  : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
1183 {
1184  typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
1185  typedef typename XprType::Scalar Scalar;
1186  // TODO: should check for smaller packet types once we can handle multi-sized packet types
1187  typedef typename packet_traits<Scalar>::type PacketScalar;
1188 
1189  enum {
1190  InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
1191  ? int(PlainObjectType::InnerStrideAtCompileTime)
1192  : int(StrideType::InnerStrideAtCompileTime),
1193  OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
1194  ? int(PlainObjectType::OuterStrideAtCompileTime)
1195  : int(StrideType::OuterStrideAtCompileTime),
1196  HasNoInnerStride = InnerStrideAtCompileTime == 1,
1197  HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
1198  HasNoStride = HasNoInnerStride && HasNoOuterStride,
1199  IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
1200 
1201  PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
1202  LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
1203  Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
1204 
1205  Alignment = int(MapOptions)&int(AlignedMask)
1206  };
1207 
1208  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
1209  : mapbase_evaluator<XprType, PlainObjectType>(map)
1210  { }
1211 };
1212 
1213 // -------------------- Ref --------------------
1214 
1215 template<typename PlainObjectType, int RefOptions, typename StrideType>
1216 struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
1217  : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
1218 {
1219  typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
1220 
1221  enum {
1222  Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
1223  Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
1224  };
1225 
1226  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1227  explicit evaluator(const XprType& ref)
1228  : mapbase_evaluator<XprType, PlainObjectType>(ref)
1229  { }
1230 };
1231 
1232 // -------------------- Block --------------------
1233 
1234 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
1235  bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
1236 
1237 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1238 struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1239  : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
1240 {
1241  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1242  typedef typename XprType::Scalar Scalar;
1243  // TODO: should check for smaller packet types once we can handle multi-sized packet types
1244  typedef typename packet_traits<Scalar>::type PacketScalar;
1245 
1246  enum {
1247  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1248 
1249  RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
1250  ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
1251  MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
1252  MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
1253 
1254  ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
1255  IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
1256  : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
1257  : ArgTypeIsRowMajor,
1258  HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
1259  InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
1260  InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
1261  ? int(inner_stride_at_compile_time<ArgType>::ret)
1262  : int(outer_stride_at_compile_time<ArgType>::ret),
1263  OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
1264  ? int(outer_stride_at_compile_time<ArgType>::ret)
1265  : int(inner_stride_at_compile_time<ArgType>::ret),
1266  MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
1267 
1268  FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
1269  FlagsRowMajorBit = XprType::Flags&RowMajorBit,
1270  Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
1271  DirectAccessBit |
1272  MaskPacketAccessBit),
1273  Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
1274 
1275  PacketAlignment = unpacket_traits<PacketScalar>::alignment,
1276  Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
1277  && (OuterStrideAtCompileTime!=0)
1278  && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
1279  Alignment = plain_enum_min(evaluator<ArgType>::Alignment, Alignment0)
1280  };
1281  typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
1282  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1283  explicit evaluator(const XprType& block) : block_evaluator_type(block)
1284  {
1285  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1286  }
1287 };
1288 
1289 // no direct-access => dispatch to a unary evaluator
1290 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1291 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1292  : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1293 {
1294  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1295 
1296  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1297  explicit block_evaluator(const XprType& block)
1298  : unary_evaluator<XprType>(block)
1299  {}
1300 };
1301 
1302 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1303 struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1304  : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1305 {
1306  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1307 
1308  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1309  explicit unary_evaluator(const XprType& block)
1310  : m_argImpl(block.nestedExpression()),
1311  m_startRow(block.startRow()),
1312  m_startCol(block.startCol()),
1313  m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
1314  { }
1315 
1316  typedef typename XprType::Scalar Scalar;
1317  typedef typename XprType::CoeffReturnType CoeffReturnType;
1318 
1319  enum {
1320  RowsAtCompileTime = XprType::RowsAtCompileTime,
1321  ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
1322  };
1323 
1324  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1325  CoeffReturnType coeff(Index row, Index col) const
1326  {
1327  return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1328  }
1329 
1330  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1331  CoeffReturnType coeff(Index index) const
1332  {
1333  return linear_coeff_impl(index, bool_constant<ForwardLinearAccess>());
1334  }
1335 
1336  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1337  Scalar& coeffRef(Index row, Index col)
1338  {
1339  return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1340  }
1341 
1342  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1343  Scalar& coeffRef(Index index)
1344  {
1345  return linear_coeffRef_impl(index, bool_constant<ForwardLinearAccess>());
1346  }
1347 
1348  template<int LoadMode, typename PacketType>
1349  EIGEN_STRONG_INLINE
1350  PacketType packet(Index row, Index col) const
1351  {
1352  return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1353  }
1354 
1355  template<int LoadMode, typename PacketType>
1356  EIGEN_STRONG_INLINE
1357  PacketType packet(Index index) const
1358  {
1359  if (ForwardLinearAccess)
1360  return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
1361  else
1362  return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1363  RowsAtCompileTime == 1 ? index : 0);
1364  }
1365 
1366  template<int StoreMode, typename PacketType>
1367  EIGEN_STRONG_INLINE
1368  void writePacket(Index row, Index col, const PacketType& x)
1369  {
1370  return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1371  }
1372 
1373  template<int StoreMode, typename PacketType>
1374  EIGEN_STRONG_INLINE
1375  void writePacket(Index index, const PacketType& x)
1376  {
1377  if (ForwardLinearAccess)
1378  return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
1379  else
1380  return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1381  RowsAtCompileTime == 1 ? index : 0,
1382  x);
1383  }
1384 
1385 protected:
1386  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1387  CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const
1388  {
1389  return m_argImpl.coeff(m_linear_offset.value() + index);
1390  }
1391  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1392  CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const
1393  {
1394  return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1395  }
1396 
1397  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1398  Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */)
1399  {
1400  return m_argImpl.coeffRef(m_linear_offset.value() + index);
1401  }
1402  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1403  Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */)
1404  {
1405  return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1406  }
1407 
1408  evaluator<ArgType> m_argImpl;
1409  const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
1410  const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
1411  const variable_if_dynamic<Index, ForwardLinearAccess ? Dynamic : 0> m_linear_offset;
1412 };
1413 
1414 // TODO: This evaluator does not actually use the child evaluator;
1415 // all action is via the data() as returned by the Block expression.
1416 
1417 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1418 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1419  : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1420  typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1421 {
1422  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1423  typedef typename XprType::Scalar Scalar;
1424 
1425  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1426  explicit block_evaluator(const XprType& block)
1427  : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
1428  {
1429  eigen_internal_assert((internal::is_constant_evaluated() || (std::uintptr_t(block.data()) % plain_enum_max(1,evaluator<XprType>::Alignment)) == 0) \
1430  && "data is not aligned");
1431  }
1432 };
1433 
1434 
1435 // -------------------- Select --------------------
1436 // NOTE shall we introduce a ternary_evaluator?
1437 
1438 // TODO enable vectorization for Select
1439 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1440 struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1441  : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1442 {
1443  typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
1444  enum {
1445  CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
1446  + plain_enum_max(evaluator<ThenMatrixType>::CoeffReadCost,
1447  evaluator<ElseMatrixType>::CoeffReadCost),
1448 
1449  Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
1450 
1451  Alignment = plain_enum_min(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
1452  };
1453 
1454  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1455  explicit evaluator(const XprType& select)
1456  : m_conditionImpl(select.conditionMatrix()),
1457  m_thenImpl(select.thenMatrix()),
1458  m_elseImpl(select.elseMatrix())
1459  {
1460  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1461  }
1462 
1463  typedef typename XprType::CoeffReturnType CoeffReturnType;
1464 
1465  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1466  CoeffReturnType coeff(Index row, Index col) const
1467  {
1468  if (m_conditionImpl.coeff(row, col))
1469  return m_thenImpl.coeff(row, col);
1470  else
1471  return m_elseImpl.coeff(row, col);
1472  }
1473 
1474  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1475  CoeffReturnType coeff(Index index) const
1476  {
1477  if (m_conditionImpl.coeff(index))
1478  return m_thenImpl.coeff(index);
1479  else
1480  return m_elseImpl.coeff(index);
1481  }
1482 
1483 protected:
1484  evaluator<ConditionMatrixType> m_conditionImpl;
1485  evaluator<ThenMatrixType> m_thenImpl;
1486  evaluator<ElseMatrixType> m_elseImpl;
1487 };
1488 
1489 
1490 // -------------------- Replicate --------------------
1491 
1492 template<typename ArgType, int RowFactor, int ColFactor>
1493 struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1494  : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1495 {
1496  typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
1497  typedef typename XprType::CoeffReturnType CoeffReturnType;
1498  enum {
1499  Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1500  };
1501  typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
1502  typedef internal::remove_all_t<ArgTypeNested> ArgTypeNestedCleaned;
1503 
1504  enum {
1505  CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
1506  LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1507  Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
1508 
1509  Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
1510  };
1511 
1512  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1513  explicit unary_evaluator(const XprType& replicate)
1514  : m_arg(replicate.nestedExpression()),
1515  m_argImpl(m_arg),
1516  m_rows(replicate.nestedExpression().rows()),
1517  m_cols(replicate.nestedExpression().cols())
1518  {}
1519 
1520  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1521  CoeffReturnType coeff(Index row, Index col) const
1522  {
1523  // try to avoid using modulo; this is a pure optimization strategy
1524  const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1525  : RowFactor==1 ? row
1526  : row % m_rows.value();
1527  const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1528  : ColFactor==1 ? col
1529  : col % m_cols.value();
1530 
1531  return m_argImpl.coeff(actual_row, actual_col);
1532  }
1533 
1534  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1535  CoeffReturnType coeff(Index index) const
1536  {
1537  // try to avoid using modulo; this is a pure optimization strategy
1538  const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1539  ? (ColFactor==1 ? index : index%m_cols.value())
1540  : (RowFactor==1 ? index : index%m_rows.value());
1541 
1542  return m_argImpl.coeff(actual_index);
1543  }
1544 
1545  template<int LoadMode, typename PacketType>
1546  EIGEN_STRONG_INLINE
1547  PacketType packet(Index row, Index col) const
1548  {
1549  const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1550  : RowFactor==1 ? row
1551  : row % m_rows.value();
1552  const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1553  : ColFactor==1 ? col
1554  : col % m_cols.value();
1555 
1556  return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1557  }
1558 
1559  template<int LoadMode, typename PacketType>
1560  EIGEN_STRONG_INLINE
1561  PacketType packet(Index index) const
1562  {
1563  const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1564  ? (ColFactor==1 ? index : index%m_cols.value())
1565  : (RowFactor==1 ? index : index%m_rows.value());
1566 
1567  return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1568  }
1569 
1570 protected:
1571  const ArgTypeNested m_arg;
1572  evaluator<ArgTypeNestedCleaned> m_argImpl;
1573  const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
1574  const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
1575 };
1576 
1577 // -------------------- MatrixWrapper and ArrayWrapper --------------------
1578 //
1579 // evaluator_wrapper_base<T> is a common base class for the
1580 // MatrixWrapper and ArrayWrapper evaluators.
1581 
1582 template<typename XprType>
1583 struct evaluator_wrapper_base
1584  : evaluator_base<XprType>
1585 {
1586  typedef remove_all_t<typename XprType::NestedExpressionType> ArgType;
1587  enum {
1588  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1589  Flags = evaluator<ArgType>::Flags,
1590  Alignment = evaluator<ArgType>::Alignment
1591  };
1592 
1593  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1594  explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1595 
1596  typedef typename ArgType::Scalar Scalar;
1597  typedef typename ArgType::CoeffReturnType CoeffReturnType;
1598 
1599  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1600  CoeffReturnType coeff(Index row, Index col) const
1601  {
1602  return m_argImpl.coeff(row, col);
1603  }
1604 
1605  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1606  CoeffReturnType coeff(Index index) const
1607  {
1608  return m_argImpl.coeff(index);
1609  }
1610 
1611  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1612  Scalar& coeffRef(Index row, Index col)
1613  {
1614  return m_argImpl.coeffRef(row, col);
1615  }
1616 
1617  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1618  Scalar& coeffRef(Index index)
1619  {
1620  return m_argImpl.coeffRef(index);
1621  }
1622 
1623  template<int LoadMode, typename PacketType>
1624  EIGEN_STRONG_INLINE
1625  PacketType packet(Index row, Index col) const
1626  {
1627  return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1628  }
1629 
1630  template<int LoadMode, typename PacketType>
1631  EIGEN_STRONG_INLINE
1632  PacketType packet(Index index) const
1633  {
1634  return m_argImpl.template packet<LoadMode,PacketType>(index);
1635  }
1636 
1637  template<int StoreMode, typename PacketType>
1638  EIGEN_STRONG_INLINE
1639  void writePacket(Index row, Index col, const PacketType& x)
1640  {
1641  m_argImpl.template writePacket<StoreMode>(row, col, x);
1642  }
1643 
1644  template<int StoreMode, typename PacketType>
1645  EIGEN_STRONG_INLINE
1646  void writePacket(Index index, const PacketType& x)
1647  {
1648  m_argImpl.template writePacket<StoreMode>(index, x);
1649  }
1650 
1651 protected:
1652  evaluator<ArgType> m_argImpl;
1653 };
1654 
1655 template<typename TArgType>
1656 struct unary_evaluator<MatrixWrapper<TArgType> >
1657  : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1658 {
1659  typedef MatrixWrapper<TArgType> XprType;
1660 
1661  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1662  explicit unary_evaluator(const XprType& wrapper)
1663  : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1664  { }
1665 };
1666 
1667 template<typename TArgType>
1668 struct unary_evaluator<ArrayWrapper<TArgType> >
1669  : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1670 {
1671  typedef ArrayWrapper<TArgType> XprType;
1672 
1673  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1674  explicit unary_evaluator(const XprType& wrapper)
1675  : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1676  { }
1677 };
1678 
1679 
1680 // -------------------- Reverse --------------------
1681 
1682 // defined in Reverse.h:
1683 template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1684 
1685 template<typename ArgType, int Direction>
1686 struct unary_evaluator<Reverse<ArgType, Direction> >
1687  : evaluator_base<Reverse<ArgType, Direction> >
1688 {
1689  typedef Reverse<ArgType, Direction> XprType;
1690  typedef typename XprType::Scalar Scalar;
1691  typedef typename XprType::CoeffReturnType CoeffReturnType;
1692 
1693  enum {
1694  IsRowMajor = XprType::IsRowMajor,
1695  IsColMajor = !IsRowMajor,
1696  ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
1697  ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1698  ReversePacket = (Direction == BothDirections)
1699  || ((Direction == Vertical) && IsColMajor)
1700  || ((Direction == Horizontal) && IsRowMajor),
1701 
1702  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1703 
1704  // let's enable LinearAccess only with vectorization because of the product overhead
1705  // FIXME enable DirectAccess with negative strides?
1706  Flags0 = evaluator<ArgType>::Flags,
1707  LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1708  || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1709  ? LinearAccessBit : 0,
1710 
1711  Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1712 
1713  Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1714  };
1715 
1716  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1717  explicit unary_evaluator(const XprType& reverse)
1718  : m_argImpl(reverse.nestedExpression()),
1719  m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1720  m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1721  { }
1722 
1723  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1724  CoeffReturnType coeff(Index row, Index col) const
1725  {
1726  return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1727  ReverseCol ? m_cols.value() - col - 1 : col);
1728  }
1729 
1730  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1731  CoeffReturnType coeff(Index index) const
1732  {
1733  return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1734  }
1735 
1736  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1737  Scalar& coeffRef(Index row, Index col)
1738  {
1739  return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1740  ReverseCol ? m_cols.value() - col - 1 : col);
1741  }
1742 
1743  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1744  Scalar& coeffRef(Index index)
1745  {
1746  return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1747  }
1748 
1749  template<int LoadMode, typename PacketType>
1750  EIGEN_STRONG_INLINE
1751  PacketType packet(Index row, Index col) const
1752  {
1753  enum {
1754  PacketSize = unpacket_traits<PacketType>::size,
1755  OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1756  OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1757  };
1758  typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1759  return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1760  ReverseRow ? m_rows.value() - row - OffsetRow : row,
1761  ReverseCol ? m_cols.value() - col - OffsetCol : col));
1762  }
1763 
1764  template<int LoadMode, typename PacketType>
1765  EIGEN_STRONG_INLINE
1766  PacketType packet(Index index) const
1767  {
1768  enum { PacketSize = unpacket_traits<PacketType>::size };
1769  return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1770  }
1771 
1772  template<int LoadMode, typename PacketType>
1773  EIGEN_STRONG_INLINE
1774  void writePacket(Index row, Index col, const PacketType& x)
1775  {
1776  // FIXME we could factorize some code with packet(i,j)
1777  enum {
1778  PacketSize = unpacket_traits<PacketType>::size,
1779  OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1780  OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1781  };
1782  typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1783  m_argImpl.template writePacket<LoadMode>(
1784  ReverseRow ? m_rows.value() - row - OffsetRow : row,
1785  ReverseCol ? m_cols.value() - col - OffsetCol : col,
1786  reverse_packet::run(x));
1787  }
1788 
1789  template<int LoadMode, typename PacketType>
1790  EIGEN_STRONG_INLINE
1791  void writePacket(Index index, const PacketType& x)
1792  {
1793  enum { PacketSize = unpacket_traits<PacketType>::size };
1794  m_argImpl.template writePacket<LoadMode>
1795  (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1796  }
1797 
1798 protected:
1799  evaluator<ArgType> m_argImpl;
1800 
1801  // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1802  // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1803  const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;
1804  const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;
1805 };
1806 
1807 
1808 // -------------------- Diagonal --------------------
1809 
1810 template<typename ArgType, int DiagIndex>
1811 struct evaluator<Diagonal<ArgType, DiagIndex> >
1812  : evaluator_base<Diagonal<ArgType, DiagIndex> >
1813 {
1814  typedef Diagonal<ArgType, DiagIndex> XprType;
1815 
1816  enum {
1817  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1818 
1819  Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1820 
1821  Alignment = 0
1822  };
1823 
1824  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1825  explicit evaluator(const XprType& diagonal)
1826  : m_argImpl(diagonal.nestedExpression()),
1827  m_index(diagonal.index())
1828  { }
1829 
1830  typedef typename XprType::Scalar Scalar;
1831  typedef typename XprType::CoeffReturnType CoeffReturnType;
1832 
1833  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1834  CoeffReturnType coeff(Index row, Index) const
1835  {
1836  return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1837  }
1838 
1839  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1840  CoeffReturnType coeff(Index index) const
1841  {
1842  return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1843  }
1844 
1845  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1846  Scalar& coeffRef(Index row, Index)
1847  {
1848  return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1849  }
1850 
1851  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1852  Scalar& coeffRef(Index index)
1853  {
1854  return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1855  }
1856 
1857 protected:
1858  evaluator<ArgType> m_argImpl;
1859  const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
1860 
1861 private:
1862  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1863  Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1864  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1865  Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1866 };
1867 
1868 
1869 //----------------------------------------------------------------------
1870 // deprecated code
1871 //----------------------------------------------------------------------
1872 
1873 // -------------------- EvalToTemp --------------------
1874 
1875 // expression class for evaluating nested expression to a temporary
1876 
1877 template<typename ArgType> class EvalToTemp;
1878 
1879 template<typename ArgType>
1880 struct traits<EvalToTemp<ArgType> >
1881  : public traits<ArgType>
1882 { };
1883 
1884 template<typename ArgType>
1885 class EvalToTemp
1886  : public dense_xpr_base<EvalToTemp<ArgType> >::type
1887 {
1888  public:
1889 
1890  typedef typename dense_xpr_base<EvalToTemp>::type Base;
1891  EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
1892 
1893  explicit EvalToTemp(const ArgType& arg)
1894  : m_arg(arg)
1895  { }
1896 
1897  const ArgType& arg() const
1898  {
1899  return m_arg;
1900  }
1901 
1903  {
1904  return m_arg.rows();
1905  }
1906 
1908  {
1909  return m_arg.cols();
1910  }
1911 
1912  private:
1913  const ArgType& m_arg;
1914 };
1915 
1916 template<typename ArgType>
1917 struct evaluator<EvalToTemp<ArgType> >
1918  : public evaluator<typename ArgType::PlainObject>
1919 {
1920  typedef EvalToTemp<ArgType> XprType;
1921  typedef typename ArgType::PlainObject PlainObject;
1922  typedef evaluator<PlainObject> Base;
1923 
1924  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1925  : m_result(xpr.arg())
1926  {
1927  internal::construct_at<Base>(this, m_result);
1928  }
1929 
1930  // This constructor is used when nesting an EvalTo evaluator in another evaluator
1931  EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
1932  : m_result(arg)
1933  {
1934  internal::construct_at<Base>(this, m_result);
1935  }
1936 
1937 protected:
1938  PlainObject m_result;
1939 };
1940 
1941 } // namespace internal
1942 
1943 } // end namespace Eigen
1944 
1945 #endif // EIGEN_COREEVALUATORS_H
Matrix3f m
int n
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL FixedBlockXpr<...,... >::Type block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols)
Definition: BlockMethods.h:96
RowXpr row(Index i)
This is the const version of row(). *‍/.
ColXpr col(Index i)
This is the const version of col().
IndexedView_or_Block operator()(const RowIndices &rowIndices, const ColIndices &colIndices)
#define EIGEN_PREDICT_TRUE(x)
Definition: Macros.h:1178
#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
Definition: Macros.h:1149
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define EIGEN_NOEXCEPT
Definition: Macros.h:1260
#define EIGEN_CONSTEXPR
Definition: Macros.h:747
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:957
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:883
#define eigen_assert(x)
Definition: Macros.h:902
int data[]
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
#define EIGEN_INTERNAL_CHECK_COST_VALUE(C)
Definition: StaticAssert.h:112
Eigen::Triplet< double > T
@ AlignedMask
Definition: Constants.h:241
@ AlignedMax
Definition: Constants.h:254
@ BothDirections
Definition: Constants.h:272
@ Horizontal
Definition: Constants.h:269
@ Vertical
Definition: Constants.h:266
const unsigned int ActualPacketAccessBit
Definition: Constants.h:107
const unsigned int PacketAccessBit
Definition: Constants.h:96
const unsigned int LinearAccessBit
Definition: Constants.h:132
const unsigned int EvalBeforeNestingBit
Definition: Constants.h:72
const unsigned int DirectAccessBit
Definition: Constants.h:157
const unsigned int RowMajorBit
Definition: Constants.h:68
constexpr int plain_enum_min(A a, B b)
Definition: Meta.h:516
constexpr int plain_enum_max(A a, B b)
Definition: Meta.h:524
constexpr bool check_implication(bool a, bool b)
Definition: Meta.h:579
Packet2cf preverse(const Packet2cf &a)
constexpr bool is_constant_evaluated()
Definition: Meta.h:587
: InteropHeaders
Definition: Core:139
const unsigned int HereditaryBits
Definition: Constants.h:197
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_arg_op< typename Derived::Scalar >, const Derived > arg(const Eigen::ArrayBase< Derived > &x)
const int Dynamic
Definition: Constants.h:24
std::ptrdiff_t j