TriangularMatrixMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H
11 #define EIGEN_TRIANGULAR_MATRIX_MATRIX_H
12 
13 #include "../InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
17 namespace internal {
18 
19 // template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>
20 // struct gemm_pack_lhs_triangular
21 // {
22 // Matrix<Scalar,mr,mr,
23 // void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* lhs_, int lhsStride, int depth, int rows)
24 // {
25 // conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
26 // const_blas_data_mapper<Scalar, StorageOrder> lhs(lhs_,lhsStride);
27 // int count = 0;
28 // const int peeled_mc = (rows/mr)*mr;
29 // for(int i=0; i<peeled_mc; i+=mr)
30 // {
31 // for(int k=0; k<depth; k++)
32 // for(int w=0; w<mr; w++)
33 // blockA[count++] = cj(lhs(i+w, k));
34 // }
35 // for(int i=peeled_mc; i<rows; i++)
36 // {
37 // for(int k=0; k<depth; k++)
38 // blockA[count++] = cj(lhs(i, k));
39 // }
40 // }
41 // };
42 
43 /* Optimized triangular matrix * matrix (_TRMM++) product built on top of
44  * the general matrix matrix product.
45  */
46 template <typename Scalar, typename Index,
47  int Mode, bool LhsIsTriangular,
48  int LhsStorageOrder, bool ConjugateLhs,
49  int RhsStorageOrder, bool ConjugateRhs,
50  int ResStorageOrder, int ResInnerStride,
51  int Version = Specialized>
52 struct product_triangular_matrix_matrix;
53 
54 template <typename Scalar, typename Index,
55  int Mode, bool LhsIsTriangular,
56  int LhsStorageOrder, bool ConjugateLhs,
57  int RhsStorageOrder, bool ConjugateRhs,
58  int ResInnerStride, int Version>
59 struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
60  LhsStorageOrder,ConjugateLhs,
61  RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride,Version>
62 {
63  static EIGEN_STRONG_INLINE void run(
64  Index rows, Index cols, Index depth,
65  const Scalar* lhs, Index lhsStride,
66  const Scalar* rhs, Index rhsStride,
67  Scalar* res, Index resIncr, Index resStride,
68  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
69  {
70  product_triangular_matrix_matrix<Scalar, Index,
71  (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
72  (!LhsIsTriangular),
73  RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
74  ConjugateRhs,
75  LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
76  ConjugateLhs,
77  ColMajor, ResInnerStride>
78  ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resIncr, resStride, alpha, blocking);
79  }
80 };
81 
82 // implements col-major += alpha * op(triangular) * op(general)
83 template <typename Scalar, typename Index, int Mode,
84  int LhsStorageOrder, bool ConjugateLhs,
85  int RhsStorageOrder, bool ConjugateRhs,
86  int ResInnerStride, int Version>
87 struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
88  LhsStorageOrder,ConjugateLhs,
89  RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>
90 {
91 
92  typedef gebp_traits<Scalar,Scalar> Traits;
93  enum {
94  SmallPanelWidth = 2 * plain_enum_max(Traits::mr, Traits::nr),
95  IsLower = (Mode&Lower) == Lower,
96  SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
97  };
98 
99  static EIGEN_DONT_INLINE void run(
100  Index _rows, Index _cols, Index _depth,
101  const Scalar* lhs_, Index lhsStride,
102  const Scalar* rhs_, Index rhsStride,
103  Scalar* res, Index resIncr, Index resStride,
104  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
105 };
106 
107 template <typename Scalar, typename Index, int Mode,
108  int LhsStorageOrder, bool ConjugateLhs,
109  int RhsStorageOrder, bool ConjugateRhs,
110  int ResInnerStride, int Version>
111 EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
112  LhsStorageOrder,ConjugateLhs,
113  RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>::run(
114  Index _rows, Index _cols, Index _depth,
115  const Scalar* lhs_, Index lhsStride,
116  const Scalar* rhs_, Index rhsStride,
117  Scalar* res_, Index resIncr, Index resStride,
118  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
119  {
120  // strip zeros
121  Index diagSize = (std::min)(_rows,_depth);
122  Index rows = IsLower ? _rows : diagSize;
123  Index depth = IsLower ? diagSize : _depth;
124  Index cols = _cols;
125 
126  typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
127  typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
128  typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
129  LhsMapper lhs(lhs_,lhsStride);
130  RhsMapper rhs(rhs_,rhsStride);
131  ResMapper res(res_, resStride, resIncr);
132 
133  Index kc = blocking.kc(); // cache block size along the K direction
134  Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
135  // The small panel size must not be larger than blocking size.
136  // Usually this should never be the case because SmallPanelWidth^2 is very small
137  // compared to L2 cache size, but let's be safe:
138  Index panelWidth = (std::min)(Index(SmallPanelWidth),(std::min)(kc,mc));
139 
140  std::size_t sizeA = kc*mc;
141  std::size_t sizeB = kc*cols;
142 
143  ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
144  ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
145 
146  // To work around an "error: member reference base type 'Matrix<...>
147  // (Eigen::internal::constructor_without_unaligned_array_assert (*)())' is
148  // not a structure or union" compilation error in nvcc (tested V8.0.61),
149  // create a dummy internal::constructor_without_unaligned_array_assert
150  // object to pass to the Matrix constructor.
151  internal::constructor_without_unaligned_array_assert a;
152  Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer(a);
153  triangularBuffer.setZero();
154  if((Mode&ZeroDiag)==ZeroDiag)
155  triangularBuffer.diagonal().setZero();
156  else
157  triangularBuffer.diagonal().setOnes();
158 
159  gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
160  gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
161  gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
162 
163  for(Index k2=IsLower ? depth : 0;
164  IsLower ? k2>0 : k2<depth;
165  IsLower ? k2-=kc : k2+=kc)
166  {
167  Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
168  Index actual_k2 = IsLower ? k2-actual_kc : k2;
169 
170  // align blocks with the end of the triangular part for trapezoidal lhs
171  if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))
172  {
173  actual_kc = rows-k2;
174  k2 = k2+actual_kc-kc;
175  }
176 
177  pack_rhs(blockB, rhs.getSubMapper(actual_k2,0), actual_kc, cols);
178 
179  // the selected lhs's panel has to be split in three different parts:
180  // 1 - the part which is zero => skip it
181  // 2 - the diagonal block => special kernel
182  // 3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP
183 
184  // the block diagonal, if any:
185  if(IsLower || actual_k2<rows)
186  {
187  // for each small vertical panels of lhs
188  for (Index k1=0; k1<actual_kc; k1+=panelWidth)
189  {
190  Index actualPanelWidth = std::min<Index>(actual_kc-k1, panelWidth);
191  Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
192  Index startBlock = actual_k2+k1;
193  Index blockBOffset = k1;
194 
195  // => GEBP with the micro triangular block
196  // The trick is to pack this micro block while filling the opposite triangular part with zeros.
197  // To this end we do an extra triangular copy to a small temporary buffer
198  for (Index k=0;k<actualPanelWidth;++k)
199  {
200  if (SetDiag)
201  triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
202  for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
203  triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
204  }
205  pack_lhs(blockA, LhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()), actualPanelWidth, actualPanelWidth);
206 
207  gebp_kernel(res.getSubMapper(startBlock, 0), blockA, blockB,
208  actualPanelWidth, actualPanelWidth, cols, alpha,
209  actualPanelWidth, actual_kc, 0, blockBOffset);
210 
211  // GEBP with remaining micro panel
212  if (lengthTarget>0)
213  {
214  Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
215 
216  pack_lhs(blockA, lhs.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget);
217 
218  gebp_kernel(res.getSubMapper(startTarget, 0), blockA, blockB,
219  lengthTarget, actualPanelWidth, cols, alpha,
220  actualPanelWidth, actual_kc, 0, blockBOffset);
221  }
222  }
223  }
224  // the part below (lower case) or above (upper case) the diagonal => GEPP
225  {
226  Index start = IsLower ? k2 : 0;
227  Index end = IsLower ? rows : (std::min)(actual_k2,rows);
228  for(Index i2=start; i2<end; i2+=mc)
229  {
230  const Index actual_mc = (std::min)(i2+mc,end)-i2;
231  gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder,false>()
232  (blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
233 
234  gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc,
235  actual_kc, cols, alpha, -1, -1, 0, 0);
236  }
237  }
238  }
239  }
240 
241 // implements col-major += alpha * op(general) * op(triangular)
242 template <typename Scalar, typename Index, int Mode,
243  int LhsStorageOrder, bool ConjugateLhs,
244  int RhsStorageOrder, bool ConjugateRhs,
245  int ResInnerStride, int Version>
246 struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
247  LhsStorageOrder,ConjugateLhs,
248  RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>
249 {
250  typedef gebp_traits<Scalar,Scalar> Traits;
251  enum {
252  SmallPanelWidth = plain_enum_max(Traits::mr, Traits::nr),
253  IsLower = (Mode&Lower) == Lower,
254  SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
255  };
256 
257  static EIGEN_DONT_INLINE void run(
258  Index _rows, Index _cols, Index _depth,
259  const Scalar* lhs_, Index lhsStride,
260  const Scalar* rhs_, Index rhsStride,
261  Scalar* res, Index resIncr, Index resStride,
262  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
263 };
264 
265 template <typename Scalar, typename Index, int Mode,
266  int LhsStorageOrder, bool ConjugateLhs,
267  int RhsStorageOrder, bool ConjugateRhs,
268  int ResInnerStride, int Version>
269 EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
270  LhsStorageOrder,ConjugateLhs,
271  RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>::run(
272  Index _rows, Index _cols, Index _depth,
273  const Scalar* lhs_, Index lhsStride,
274  const Scalar* rhs_, Index rhsStride,
275  Scalar* res_, Index resIncr, Index resStride,
276  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
277  {
278  const Index PacketBytes = packet_traits<Scalar>::size*sizeof(Scalar);
279  // strip zeros
280  Index diagSize = (std::min)(_cols,_depth);
281  Index rows = _rows;
282  Index depth = IsLower ? _depth : diagSize;
283  Index cols = IsLower ? diagSize : _cols;
284 
285  typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
286  typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
287  typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
288  LhsMapper lhs(lhs_,lhsStride);
289  RhsMapper rhs(rhs_,rhsStride);
290  ResMapper res(res_, resStride, resIncr);
291 
292  Index kc = blocking.kc(); // cache block size along the K direction
293  Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
294 
295  std::size_t sizeA = kc*mc;
296  std::size_t sizeB = kc*cols+EIGEN_MAX_ALIGN_BYTES/sizeof(Scalar);
297 
298  ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
299  ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
300 
301  internal::constructor_without_unaligned_array_assert a;
302  Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer(a);
303  triangularBuffer.setZero();
304  if((Mode&ZeroDiag)==ZeroDiag)
305  triangularBuffer.diagonal().setZero();
306  else
307  triangularBuffer.diagonal().setOnes();
308 
309  gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
310  gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
311  gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
312  gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
313 
314  for(Index k2=IsLower ? 0 : depth;
315  IsLower ? k2<depth : k2>0;
316  IsLower ? k2+=kc : k2-=kc)
317  {
318  Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
319  Index actual_k2 = IsLower ? k2 : k2-actual_kc;
320 
321  // align blocks with the end of the triangular part for trapezoidal rhs
322  if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))
323  {
324  actual_kc = cols-k2;
325  k2 = actual_k2 + actual_kc - kc;
326  }
327 
328  // remaining size
329  Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
330  // size of the triangular part
331  Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
332 
333  Scalar* geb = blockB+ts*ts;
334  geb = geb + internal::first_aligned<PacketBytes>(geb,PacketBytes/sizeof(Scalar));
335 
336  pack_rhs(geb, rhs.getSubMapper(actual_k2,IsLower ? 0 : k2), actual_kc, rs);
337 
338  // pack the triangular part of the rhs padding the unrolled blocks with zeros
339  if(ts>0)
340  {
341  for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
342  {
343  Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
344  Index actual_j2 = actual_k2 + j2;
345  Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
346  Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
347  // general part
348  pack_rhs_panel(blockB+j2*actual_kc,
349  rhs.getSubMapper(actual_k2+panelOffset, actual_j2),
350  panelLength, actualPanelWidth,
351  actual_kc, panelOffset);
352 
353  // append the triangular part via a temporary buffer
354  for (Index j=0;j<actualPanelWidth;++j)
355  {
356  if (SetDiag)
357  triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
358  for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
359  triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
360  }
361 
362  pack_rhs_panel(blockB+j2*actual_kc,
363  RhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()),
364  actualPanelWidth, actualPanelWidth,
365  actual_kc, j2);
366  }
367  }
368 
369  for (Index i2=0; i2<rows; i2+=mc)
370  {
371  const Index actual_mc = (std::min)(mc,rows-i2);
372  pack_lhs(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
373 
374  // triangular kernel
375  if(ts>0)
376  {
377  for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
378  {
379  Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
380  Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
381  Index blockOffset = IsLower ? j2 : 0;
382 
383  gebp_kernel(res.getSubMapper(i2, actual_k2 + j2),
384  blockA, blockB+j2*actual_kc,
385  actual_mc, panelLength, actualPanelWidth,
386  alpha,
387  actual_kc, actual_kc, // strides
388  blockOffset, blockOffset);// offsets
389  }
390  }
391  gebp_kernel(res.getSubMapper(i2, IsLower ? 0 : k2),
392  blockA, geb, actual_mc, actual_kc, rs,
393  alpha,
394  -1, -1, 0, 0);
395  }
396  }
397  }
398 
399 
403 } // end namespace internal
404 
405 namespace internal {
406 template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
407 struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
408 {
409  template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)
410  {
411  typedef typename Lhs::Scalar LhsScalar;
412  typedef typename Rhs::Scalar RhsScalar;
413  typedef typename Dest::Scalar Scalar;
414 
415  typedef internal::blas_traits<Lhs> LhsBlasTraits;
416  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
417  typedef internal::remove_all_t<ActualLhsType> ActualLhsTypeCleaned;
418  typedef internal::blas_traits<Rhs> RhsBlasTraits;
419  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
420  typedef internal::remove_all_t<ActualRhsType> ActualRhsTypeCleaned;
421 
422  internal::add_const_on_value_type_t<ActualLhsType> lhs = LhsBlasTraits::extract(a_lhs);
423  internal::add_const_on_value_type_t<ActualRhsType> rhs = RhsBlasTraits::extract(a_rhs);
424 
425  LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(a_lhs);
426  RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(a_rhs);
427  Scalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
428 
429  typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
430  Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;
431 
432  enum { IsLower = (Mode&Lower) == Lower };
433  Index stripedRows = ((!LhsIsTriangular) || (IsLower)) ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols());
434  Index stripedCols = ((LhsIsTriangular) || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows());
435  Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows()))
436  : ((IsLower) ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols()));
437 
438  BlockingType blocking(stripedRows, stripedCols, stripedDepth, 1, false);
439 
440  internal::product_triangular_matrix_matrix<Scalar, Index,
441  Mode, LhsIsTriangular,
442  (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
443  (internal::traits<ActualRhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
444  (internal::traits<Dest >::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>
445  ::run(
446  stripedRows, stripedCols, stripedDepth, // sizes
447  &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
448  &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
449  &dst.coeffRef(0,0), dst.innerStride(), dst.outerStride(), // result info
450  actualAlpha, blocking
451  );
452 
453  // Apply correction if the diagonal is unit and a scalar factor was nested:
454  if ((Mode&UnitDiag)==UnitDiag)
455  {
456  if (LhsIsTriangular && !numext::is_exactly_one(lhs_alpha))
457  {
458  Index diagSize = (std::min)(lhs.rows(),lhs.cols());
459  dst.topRows(diagSize) -= ((lhs_alpha-LhsScalar(1))*a_rhs).topRows(diagSize);
460  }
461  else if ((!LhsIsTriangular) && !numext::is_exactly_one(rhs_alpha))
462  {
463  Index diagSize = (std::min)(rhs.rows(),rhs.cols());
464  dst.leftCols(diagSize) -= (rhs_alpha-RhsScalar(1))*a_lhs.leftCols(diagSize);
465  }
466  }
467  }
468 };
469 
470 } // end namespace internal
471 
472 } // end namespace Eigen
473 
474 #endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H
#define EIGEN_MAX_ALIGN_BYTES
#define EIGEN_DONT_INLINE
Definition: Macros.h:844
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:847
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
static const lastp1_t end
@ UnitDiag
Definition: Constants.h:215
@ ZeroDiag
Definition: Constants.h:217
@ Lower
Definition: Constants.h:211
@ Upper
Definition: Constants.h:213
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323
const unsigned int RowMajorBit
Definition: Constants.h:68
bfloat16() min(const bfloat16 &a, const bfloat16 &b)
Definition: BFloat16.h:684
constexpr int plain_enum_max(A a, B b)
Definition: Meta.h:524
bool is_exactly_one(const X &x)
Definition: Meta.h:482
: InteropHeaders
Definition: Core:139
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
@ Specialized
Definition: Constants.h:312
Definition: BFloat16.h:222
std::ptrdiff_t j