GeneralProduct.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5 // Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_GENERAL_PRODUCT_H
12 #define EIGEN_GENERAL_PRODUCT_H
13 
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
18 enum {
19  Large = 2,
20  Small = 3
21 };
22 
23 // Define the threshold value to fallback from the generic matrix-matrix product
24 // implementation (heavy) to the lightweight coeff-based product one.
25 // See generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
26 // in products/GeneralMatrixMatrix.h for more details.
27 // TODO This threshold should also be used in the compile-time selector below.
28 #ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD
29 // This default value has been obtained on a Haswell architecture.
30 #define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20
31 #endif
32 
33 namespace internal {
34 
35 template<int Rows, int Cols, int Depth> struct product_type_selector;
36 
37 template<int Size, int MaxSize> struct product_size_category
38 {
39  enum {
40  #ifndef EIGEN_GPU_COMPILE_PHASE
41  is_large = MaxSize == Dynamic ||
44  #else
45  is_large = 0,
46  #endif
47  value = is_large ? Large
48  : Size == 1 ? 1
49  : Small
50  };
51 };
52 
53 template<typename Lhs, typename Rhs> struct product_type
54 {
55  typedef remove_all_t<Lhs> Lhs_;
56  typedef remove_all_t<Rhs> Rhs_;
57  enum {
58  MaxRows = traits<Lhs_>::MaxRowsAtCompileTime,
59  Rows = traits<Lhs_>::RowsAtCompileTime,
60  MaxCols = traits<Rhs_>::MaxColsAtCompileTime,
61  Cols = traits<Rhs_>::ColsAtCompileTime,
62  MaxDepth = min_size_prefer_fixed(traits<Lhs_>::MaxColsAtCompileTime,
63  traits<Rhs_>::MaxRowsAtCompileTime),
64  Depth = min_size_prefer_fixed(traits<Lhs_>::ColsAtCompileTime,
65  traits<Rhs_>::RowsAtCompileTime)
66  };
67 
68  // the splitting into different lines of code here, introducing the _select enums and the typedef below,
69  // is to work around an internal compiler error with gcc 4.1 and 4.2.
70 private:
71  enum {
72  rows_select = product_size_category<Rows,MaxRows>::value,
73  cols_select = product_size_category<Cols,MaxCols>::value,
74  depth_select = product_size_category<Depth,MaxDepth>::value
75  };
76  typedef product_type_selector<rows_select, cols_select, depth_select> selector;
77 
78 public:
79  enum {
80  value = selector::ret,
81  ret = selector::ret
82  };
83 #ifdef EIGEN_DEBUG_PRODUCT
84  static void debug()
85  {
86  EIGEN_DEBUG_VAR(Rows);
87  EIGEN_DEBUG_VAR(Cols);
88  EIGEN_DEBUG_VAR(Depth);
89  EIGEN_DEBUG_VAR(rows_select);
90  EIGEN_DEBUG_VAR(cols_select);
91  EIGEN_DEBUG_VAR(depth_select);
92  EIGEN_DEBUG_VAR(value);
93  }
94 #endif
95 };
96 
97 /* The following allows to select the kind of product at compile time
98  * based on the three dimensions of the product.
99  * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
100 // FIXME I'm not sure the current mapping is the ideal one.
101 template<int M, int N> struct product_type_selector<M,N,1> { enum { ret = OuterProduct }; };
102 template<int M> struct product_type_selector<M, 1, 1> { enum { ret = LazyCoeffBasedProductMode }; };
103 template<int N> struct product_type_selector<1, N, 1> { enum { ret = LazyCoeffBasedProductMode }; };
104 template<int Depth> struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; };
105 template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; };
106 template<> struct product_type_selector<Small,1, Small> { enum { ret = CoeffBasedProductMode }; };
107 template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; };
108 template<> struct product_type_selector<Small,Small,Small> { enum { ret = CoeffBasedProductMode }; };
109 template<> struct product_type_selector<Small, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
110 template<> struct product_type_selector<Small, Large, 1> { enum { ret = LazyCoeffBasedProductMode }; };
111 template<> struct product_type_selector<Large, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
112 template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; };
113 template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; };
114 template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; };
115 template<> struct product_type_selector<Large,1, Small> { enum { ret = CoeffBasedProductMode }; };
116 template<> struct product_type_selector<Large,1, Large> { enum { ret = GemvProduct }; };
117 template<> struct product_type_selector<Small,1, Large> { enum { ret = CoeffBasedProductMode }; };
118 template<> struct product_type_selector<Small,Small,Large> { enum { ret = GemmProduct }; };
119 template<> struct product_type_selector<Large,Small,Large> { enum { ret = GemmProduct }; };
120 template<> struct product_type_selector<Small,Large,Large> { enum { ret = GemmProduct }; };
121 template<> struct product_type_selector<Large,Large,Large> { enum { ret = GemmProduct }; };
122 template<> struct product_type_selector<Large,Small,Small> { enum { ret = CoeffBasedProductMode }; };
123 template<> struct product_type_selector<Small,Large,Small> { enum { ret = CoeffBasedProductMode }; };
124 template<> struct product_type_selector<Large,Large,Small> { enum { ret = GemmProduct }; };
125 
126 } // end namespace internal
127 
128 
132 // FIXME : maybe the "inner product" could return a Scalar
133 // instead of a 1x1 matrix ??
134 // Pro: more natural for the user
135 // Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
136 // product ends up to a row-vector times col-vector product... To tackle this use
137 // case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
138 
139 
143 
147 /* According to the shape/flags of the matrix we have to distinghish 3 different cases:
148  * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
149  * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
150  * 3 - all other cases are handled using a simple loop along the outer-storage direction.
151  * Therefore we need a lower level meta selector.
152  * Furthermore, if the matrix is the rhs, then the product has to be transposed.
153  */
154 namespace internal {
155 
156 template<int Side, int StorageOrder, bool BlasCompatible>
157 struct gemv_dense_selector;
158 
159 } // end namespace internal
160 
161 namespace internal {
162 
163 template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;
164 
165 template<typename Scalar,int Size,int MaxSize>
166 struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
167 {
168  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
169 };
170 
171 template<typename Scalar,int Size>
172 struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
173 {
174  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { return 0; }
175 };
176 
177 template<typename Scalar,int Size,int MaxSize>
178 struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
179 {
180  enum {
181  ForceAlignment = internal::packet_traits<Scalar>::Vectorizable,
183  };
184  #if EIGEN_MAX_STATIC_ALIGN_BYTES!=0
185  internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize), 0,
186  internal::plain_enum_min(AlignedMax, PacketSize)> m_data;
187  EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
188  #else
189  // Some architectures cannot align on the stack,
190  // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
191  internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize)+(ForceAlignment?EIGEN_MAX_ALIGN_BYTES:0),0> m_data;
192  EIGEN_STRONG_INLINE Scalar* data() {
193  return ForceAlignment
194  ? reinterpret_cast<Scalar*>((std::uintptr_t(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES)
195  : m_data.array;
196  }
197  #endif
198 };
199 
200 // The vector is on the left => transposition
201 template<int StorageOrder, bool BlasCompatible>
202 struct gemv_dense_selector<OnTheLeft,StorageOrder,BlasCompatible>
203 {
204  template<typename Lhs, typename Rhs, typename Dest>
205  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
206  {
207  Transpose<Dest> destT(dest);
208  enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
209  gemv_dense_selector<OnTheRight,OtherStorageOrder,BlasCompatible>
210  ::run(rhs.transpose(), lhs.transpose(), destT, alpha);
211  }
212 };
213 
214 template<> struct gemv_dense_selector<OnTheRight,ColMajor,true>
215 {
216  template<typename Lhs, typename Rhs, typename Dest>
217  static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
218  {
219  typedef typename Lhs::Scalar LhsScalar;
220  typedef typename Rhs::Scalar RhsScalar;
221  typedef typename Dest::Scalar ResScalar;
222 
223  typedef internal::blas_traits<Lhs> LhsBlasTraits;
224  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
225  typedef internal::blas_traits<Rhs> RhsBlasTraits;
226  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
227 
228  typedef Map<Matrix<ResScalar,Dynamic,1>, plain_enum_min(AlignedMax, internal::packet_traits<ResScalar>::size)> MappedDest;
229 
230  ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
231  ActualRhsType actualRhs = RhsBlasTraits::extract(rhs);
232 
233  ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
234 
235  // make sure Dest is a compile-time vector type (bug 1166)
236  typedef std::conditional_t<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr> ActualDest;
237 
238  enum {
239  // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
240  // on, the other hand it is good for the cache to pack the vector anyways...
241  EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
243  MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime!=0)
244  };
245 
246  typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
247  typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
248  RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
249 
250  if(!MightCannotUseDest)
251  {
252  // shortcut if we are sure to be able to use dest directly,
253  // this ease the compiler to generate cleaner and more optimzized code for most common cases
254  general_matrix_vector_product
255  <Index,LhsScalar,LhsMapper,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(
256  actualLhs.rows(), actualLhs.cols(),
257  LhsMapper(actualLhs.data(), actualLhs.outerStride()),
258  RhsMapper(actualRhs.data(), actualRhs.innerStride()),
259  dest.data(), 1,
260  compatibleAlpha);
261  }
262  else
263  {
264  gemv_static_vector_if<ResScalar,ActualDest::SizeAtCompileTime,ActualDest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
265 
266  const bool alphaIsCompatible = (!ComplexByReal) || (numext::is_exactly_zero(numext::imag(actualAlpha)));
267  const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
268 
269  ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
270  evalToDest ? dest.data() : static_dest.data());
271 
272  if(!evalToDest)
273  {
274  #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
275  Index size = dest.size();
276  EIGEN_DENSE_STORAGE_CTOR_PLUGIN
277  #endif
278  if(!alphaIsCompatible)
279  {
280  MappedDest(actualDestPtr, dest.size()).setZero();
281  compatibleAlpha = RhsScalar(1);
282  }
283  else
284  MappedDest(actualDestPtr, dest.size()) = dest;
285  }
286 
287  general_matrix_vector_product
288  <Index,LhsScalar,LhsMapper,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(
289  actualLhs.rows(), actualLhs.cols(),
290  LhsMapper(actualLhs.data(), actualLhs.outerStride()),
291  RhsMapper(actualRhs.data(), actualRhs.innerStride()),
292  actualDestPtr, 1,
293  compatibleAlpha);
294 
295  if (!evalToDest)
296  {
297  if(!alphaIsCompatible)
298  dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size());
299  else
300  dest = MappedDest(actualDestPtr, dest.size());
301  }
302  }
303  }
304 };
305 
306 template<> struct gemv_dense_selector<OnTheRight,RowMajor,true>
307 {
308  template<typename Lhs, typename Rhs, typename Dest>
309  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
310  {
311  typedef typename Lhs::Scalar LhsScalar;
312  typedef typename Rhs::Scalar RhsScalar;
313  typedef typename Dest::Scalar ResScalar;
314 
315  typedef internal::blas_traits<Lhs> LhsBlasTraits;
316  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
317  typedef internal::blas_traits<Rhs> RhsBlasTraits;
318  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
319  typedef internal::remove_all_t<ActualRhsType> ActualRhsTypeCleaned;
320 
321  std::add_const_t<ActualLhsType> actualLhs = LhsBlasTraits::extract(lhs);
322  std::add_const_t<ActualRhsType> actualRhs = RhsBlasTraits::extract(rhs);
323 
324  ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
325 
326  enum {
327  // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
328  // on, the other hand it is good for the cache to pack the vector anyways...
329  DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime==0
330  };
331 
332  gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
333 
334  ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
335  DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
336 
337  if(!DirectlyUseRhs)
338  {
339  #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
340  Index size = actualRhs.size();
341  EIGEN_DENSE_STORAGE_CTOR_PLUGIN
342  #endif
343  Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
344  }
345 
346  typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
347  typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;
348  general_matrix_vector_product
349  <Index,LhsScalar,LhsMapper,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(
350  actualLhs.rows(), actualLhs.cols(),
351  LhsMapper(actualLhs.data(), actualLhs.outerStride()),
352  RhsMapper(actualRhsPtr, 1),
353  dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166)
354  actualAlpha);
355  }
356 };
357 
358 template<> struct gemv_dense_selector<OnTheRight,ColMajor,false>
359 {
360  template<typename Lhs, typename Rhs, typename Dest>
361  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
362  {
363  EIGEN_STATIC_ASSERT((!nested_eval<Lhs,1>::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);
364  // TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory, otherwise use a temp
365  typename nested_eval<Rhs,1>::type actual_rhs(rhs);
366  const Index size = rhs.rows();
367  for(Index k=0; k<size; ++k)
368  dest += (alpha*actual_rhs.coeff(k)) * lhs.col(k);
369  }
370 };
371 
372 template<> struct gemv_dense_selector<OnTheRight,RowMajor,false>
373 {
374  template<typename Lhs, typename Rhs, typename Dest>
375  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
376  {
377  EIGEN_STATIC_ASSERT((!nested_eval<Lhs,1>::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);
378  typename nested_eval<Rhs,Lhs::RowsAtCompileTime>::type actual_rhs(rhs);
379  const Index rows = dest.rows();
380  for(Index i=0; i<rows; ++i)
381  dest.coeffRef(i) += alpha * (lhs.row(i).cwiseProduct(actual_rhs.transpose())).sum();
382  }
383 };
384 
385 } // end namespace internal
386 
387 
397 template<typename Derived>
398 template<typename OtherDerived>
399 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
400 const Product<Derived, OtherDerived>
402 {
403  // A note regarding the function declaration: In MSVC, this function will sometimes
404  // not be inlined since DenseStorage is an unwindable object for dynamic
405  // matrices and product types are holding a member to store the result.
406  // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
407  enum {
408  ProductIsValid = Derived::ColsAtCompileTime==Dynamic
409  || OtherDerived::RowsAtCompileTime==Dynamic
410  || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
411  AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
412  SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
413  };
414  // note to the lost user:
415  // * for a dot product use: v1.dot(v2)
416  // * for a coeff-wise product use: v1.cwiseProduct(v2)
417  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
418  INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
419  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
420  INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
421  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
422 #ifdef EIGEN_DEBUG_PRODUCT
423  internal::product_type<Derived,OtherDerived>::debug();
424 #endif
425 
426  return Product<Derived, OtherDerived>(derived(), other.derived());
427 }
428 
440 template<typename Derived>
441 template<typename OtherDerived>
442 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
445 {
446  enum {
447  ProductIsValid = Derived::ColsAtCompileTime==Dynamic
448  || OtherDerived::RowsAtCompileTime==Dynamic
449  || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
450  AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
451  SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
452  };
453  // note to the lost user:
454  // * for a dot product use: v1.dot(v2)
455  // * for a coeff-wise product use: v1.cwiseProduct(v2)
456  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
457  INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
458  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
459  INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
460  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
461 
462  return Product<Derived,OtherDerived,LazyProduct>(derived(), other.derived());
463 }
464 
465 } // end namespace Eigen
466 
467 #endif // EIGEN_PRODUCT_H
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
const ImagReturnType imag() const
#define EIGEN_MAX_ALIGN_BYTES
Matrix4Xd M
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define EIGEN_DEBUG_VAR(x)
Definition: Macros.h:806
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:883
int data[]
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:847
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0, TYPE1)
Definition: StaticAssert.h:68
internal::traits< Homogeneous< MatrixType, Direction_ > >::Scalar Scalar
Definition: DenseBase.h:61
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:52
const Product< Derived, OtherDerived, LazyProduct > lazyProduct(const MatrixBase< OtherDerived > &other) const
const Product< Derived, OtherDerived > operator*(const MatrixBase< OtherDerived > &other) const
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:77
@ AlignedMax
Definition: Constants.h:254
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323
@ OnTheLeft
Definition: Constants.h:334
@ OnTheRight
Definition: Constants.h:336
constexpr int plain_enum_min(A a, B b)
Definition: Meta.h:516
EIGEN_ALWAYS_INLINE ResScalar combine_scalar_factors(const ResScalar &alpha, const Lhs &lhs, const Rhs &rhs)
Definition: BlasUtil.h:628
constexpr int min_size_prefer_fixed(A a, B b)
Definition: Meta.h:553
bool is_exactly_zero(const X &x)
Definition: Meta.h:475
: InteropHeaders
Definition: Core:139
@ GemvProduct
Definition: Constants.h:504
@ InnerProduct
Definition: Constants.h:504
@ CoeffBasedProductMode
Definition: Constants.h:504
@ OuterProduct
Definition: Constants.h:504
@ GemmProduct
Definition: Constants.h:504
@ LazyCoeffBasedProductMode
Definition: Constants.h:504
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const int Dynamic
Definition: Constants.h:24