BDCSVD.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
5 // research report written by Ming Gu and Stanley C.Eisenstat
6 // The code variable names correspond to the names they used in their
7 // report
8 //
9 // Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
10 // Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
11 // Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
12 // Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
13 // Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
14 // Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
15 //
16 // Source Code Form is subject to the terms of the Mozilla
17 // Public License v. 2.0. If a copy of the MPL was not distributed
18 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
19 
20 #ifndef EIGEN_BDCSVD_H
21 #define EIGEN_BDCSVD_H
22 // #define EIGEN_BDCSVD_DEBUG_VERBOSE
23 // #define EIGEN_BDCSVD_SANITY_CHECKS
24 
25 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
26 #undef eigen_internal_assert
27 #define eigen_internal_assert(X) assert(X);
28 #endif
29 
30 #include "./InternalHeaderCheck.h"
31 
32 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
33 #include <iostream>
34 #endif
35 
36 namespace Eigen {
37 
38 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
39 IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]");
40 #endif
41 
42 template <typename MatrixType_, int Options>
43 class BDCSVD;
44 
45 namespace internal {
46 
47 template <typename MatrixType_, int Options>
48 struct traits<BDCSVD<MatrixType_, Options> > : svd_traits<MatrixType_, Options> {
49  typedef MatrixType_ MatrixType;
50 };
51 
52 template <typename MatrixType, int Options>
53 struct allocate_small_svd {
54  static void run(JacobiSVD<MatrixType, Options>& smallSvd, Index rows, Index cols, unsigned int computationOptions) {
55  (void)computationOptions;
56  smallSvd = JacobiSVD<MatrixType, Options>(rows, cols);
57  }
58 };
59 
62 
63 template <typename MatrixType>
64 struct allocate_small_svd<MatrixType, 0> {
65  static void run(JacobiSVD<MatrixType>& smallSvd, Index rows, Index cols, unsigned int computationOptions) {
66  smallSvd = JacobiSVD<MatrixType>(rows, cols, computationOptions);
67  }
68 };
69 
71 
72 } // end namespace internal
73 
103 template <typename MatrixType_, int Options_>
104 class BDCSVD : public SVDBase<BDCSVD<MatrixType_, Options_> > {
106 
107 public:
108  using Base::rows;
109  using Base::cols;
110  using Base::computeU;
111  using Base::computeV;
112 
113  typedef MatrixType_ MatrixType;
114  typedef typename Base::Scalar Scalar;
115  typedef typename Base::RealScalar RealScalar;
117  typedef typename Base::Index Index;
118  enum {
119  Options = Options_,
129  };
130 
131  typedef typename Base::MatrixUType MatrixUType;
132  typedef typename Base::MatrixVType MatrixVType;
134 
142 
148  BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0)
149  {}
150 
159  }
160 
176  BDCSVD(Index rows, Index cols, unsigned int computationOptions) : m_algoswap(16), m_numIters(0) {
177  internal::check_svd_options_assertions<MatrixType, Options>(computationOptions, rows, cols);
178  allocate(rows, cols, computationOptions);
179  }
180 
186  BDCSVD(const MatrixType& matrix) : m_algoswap(16), m_numIters(0) {
188  }
189 
203  BDCSVD(const MatrixType& matrix, unsigned int computationOptions) : m_algoswap(16), m_numIters(0) {
204  internal::check_svd_options_assertions<MatrixType, Options>(computationOptions, matrix.rows(), matrix.cols());
205  compute_impl(matrix, computationOptions);
206  }
207 
208  ~BDCSVD() {}
209 
215  BDCSVD& compute(const MatrixType& matrix) { return compute_impl(matrix, m_computationOptions); }
216 
227  BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions) {
228  internal::check_svd_options_assertions<MatrixType, Options>(computationOptions, matrix.rows(), matrix.cols());
229  return compute_impl(matrix, computationOptions);
230  }
231 
232  void setSwitchSize(int s)
233  {
234  eigen_assert(s>=3 && "BDCSVD the size of the algo switch has to be at least 3.");
235  m_algoswap = s;
236  }
237 
238 private:
239  BDCSVD& compute_impl(const MatrixType& matrix, unsigned int computationOptions);
240  void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
241  void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
242  void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
243  void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);
244  void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);
245  void deflation43(Index firstCol, Index shift, Index i, Index size);
246  void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
247  void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
248  template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
249  void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
251  static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
252  template <typename SVDType>
253  void computeBaseCase(SVDType& svd, Index n, Index firstCol, Index firstRowW, Index firstColW, Index shift);
254 
255  protected:
256  void allocate(Index rows, Index cols, unsigned int computationOptions);
266  internal::UpperBidiagonalization<MatrixX> bid;
269 
271  using Base::m_computeThinU;
272  using Base::m_computeThinV;
273  using Base::m_diagSize;
274  using Base::m_info;
275  using Base::m_isInitialized;
276  using Base::m_matrixU;
277  using Base::m_matrixV;
280 
281  public:
283 }; // end class BDCSVD
284 
285 // Method to allocate and initialize matrix and attributes
286 template <typename MatrixType, int Options>
287 void BDCSVD<MatrixType, Options>::allocate(Index rows, Index cols, unsigned int computationOptions) {
288  if (Base::allocate(rows, cols, computationOptions))
289  return;
290 
291  if (cols < m_algoswap)
292  internal::allocate_small_svd<MatrixType, ComputationOptions>::run(smallSvd, rows, cols, computationOptions);
293 
294  m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
295  m_compU = computeV();
296  m_compV = computeU();
297  m_isTranspose = (cols > rows);
298  if (m_isTranspose)
299  std::swap(m_compU, m_compV);
300 
301  // kMinAspectRatio is the crossover point that determines if we perform R-Bidiagonalization
302  // or bidiagonalize the input matrix directly.
303  // It is based off of LAPACK's dgesdd routine, which uses 11.0/6.0
304  // we use a larger scalar to prevent a regression for relatively square matrices.
305  constexpr Index kMinAspectRatio = 4;
306  constexpr bool disableQrDecomp = static_cast<int>(QRDecomposition) == static_cast<int>(DisableQRDecomposition);
307  m_useQrDecomp = !disableQrDecomp && ((rows / kMinAspectRatio > cols) || (cols / kMinAspectRatio > rows));
308  if (m_useQrDecomp) {
310  reducedTriangle = MatrixX(m_diagSize, m_diagSize);
311  }
312 
313  copyWorkspace = MatrixX(m_isTranspose ? cols : rows, m_isTranspose ? rows : cols);
314  bid = internal::UpperBidiagonalization<MatrixX>(m_useQrDecomp ? m_diagSize : copyWorkspace.rows(),
315  m_useQrDecomp ? m_diagSize : copyWorkspace.cols());
316 
317  if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );
318  else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );
319 
320  if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);
321 
322  m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);
323  m_workspaceI.resize(3*m_diagSize);
324 } // end allocate
325 
326 template <typename MatrixType, int Options>
328  unsigned int computationOptions) {
329 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
330  std::cout << "\n\n\n======================================================================================================================\n\n\n";
331 #endif
332  using std::abs;
333 
334  allocate(matrix.rows(), matrix.cols(), computationOptions);
335 
336  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
337 
338  //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return
339  if(matrix.cols() < m_algoswap)
340  {
341  smallSvd.compute(matrix);
342  m_isInitialized = true;
343  m_info = smallSvd.info();
344  if (m_info == Success || m_info == NoConvergence) {
345  if (computeU()) m_matrixU = smallSvd.matrixU();
346  if (computeV()) m_matrixV = smallSvd.matrixV();
347  m_singularValues = smallSvd.singularValues();
348  m_nonzeroSingularValues = smallSvd.nonzeroSingularValues();
349  }
350  return *this;
351  }
352 
353  //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
354  RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
355  if (!(numext::isfinite)(scale)) {
356  m_isInitialized = true;
357  m_info = InvalidInput;
358  return *this;
359  }
360 
361  if(numext::is_exactly_zero(scale)) scale = Literal(1);
362 
363  if (m_isTranspose) copyWorkspace = matrix.adjoint() / scale;
364  else copyWorkspace = matrix / scale;
365 
366  //**** step 1 - Bidiagonalization.
367  // If the problem is sufficiently rectangular, we perform R-Bidiagonalization: compute A = Q(R/0)
368  // and then bidiagonalize R. Otherwise, if the problem is relatively square, we
369  // bidiagonalize the input matrix directly.
370  if (m_useQrDecomp) {
371  qrDecomp.compute(copyWorkspace);
372  reducedTriangle = qrDecomp.matrixQR().topRows(m_diagSize);
373  reducedTriangle.template triangularView<StrictlyLower>().setZero();
374  bid.compute(reducedTriangle);
375  } else {
376  bid.compute(copyWorkspace);
377  }
378 
379  //**** step 2 - Divide & Conquer
380  m_naiveU.setZero();
381  m_naiveV.setZero();
382  // FIXME this line involves a temporary matrix
383  m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
384  m_computed.template bottomRows<1>().setZero();
385  divide(0, m_diagSize - 1, 0, 0, 0);
386  if (m_info != Success && m_info != NoConvergence) {
387  m_isInitialized = true;
388  return *this;
389  }
390 
391  //**** step 3 - Copy singular values and vectors
392  for (int i=0; i<m_diagSize; i++)
393  {
394  RealScalar a = abs(m_computed.coeff(i, i));
395  m_singularValues.coeffRef(i) = a * scale;
396  if (a<considerZero)
397  {
398  m_nonzeroSingularValues = i;
399  m_singularValues.tail(m_diagSize - i - 1).setZero();
400  break;
401  }
402  else if (i == m_diagSize - 1)
403  {
404  m_nonzeroSingularValues = i + 1;
405  break;
406  }
407  }
408 
409  //**** step 4 - Finalize unitaries U and V
410  if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);
411  else copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);
412 
413  if (m_useQrDecomp) {
414  if (m_isTranspose && computeV()) m_matrixV.applyOnTheLeft(qrDecomp.householderQ());
415  else if (!m_isTranspose && computeU()) m_matrixU.applyOnTheLeft(qrDecomp.householderQ());
416  }
417 
418  m_isInitialized = true;
419  return *this;
420 } // end compute
421 
422 template <typename MatrixType, int Options>
423 template <typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
424 void BDCSVD<MatrixType, Options>::copyUV(const HouseholderU& householderU, const HouseholderV& householderV,
425  const NaiveU& naiveU, const NaiveV& naiveV) {
426  // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
427  if (computeU())
428  {
429  Index Ucols = m_computeThinU ? m_diagSize : rows();
430  m_matrixU = MatrixX::Identity(rows(), Ucols);
431  m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
432  // FIXME the following conditionals involve temporary buffers
433  if (m_useQrDecomp) m_matrixU.topLeftCorner(householderU.cols(), m_diagSize).applyOnTheLeft(householderU);
434  else m_matrixU.applyOnTheLeft(householderU);
435  }
436  if (computeV())
437  {
438  Index Vcols = m_computeThinV ? m_diagSize : cols();
439  m_matrixV = MatrixX::Identity(cols(), Vcols);
440  m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
441  // FIXME the following conditionals involve temporary buffers
442  if (m_useQrDecomp) m_matrixV.topLeftCorner(householderV.cols(), m_diagSize).applyOnTheLeft(householderV);
443  else m_matrixV.applyOnTheLeft(householderV);
444  }
445 }
446 
455 template <typename MatrixType, int Options>
457  Index n = A.rows();
458  if(n>100)
459  {
460  // If the matrices are large enough, let's exploit the sparse structure of A by
461  // splitting it in half (wrt n1), and packing the non-zero columns.
462  Index n2 = n - n1;
463  Map<MatrixXr> A1(m_workspace.data() , n1, n);
464  Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);
465  Map<MatrixXr> B1(m_workspace.data()+ n*n, n, n);
466  Map<MatrixXr> B2(m_workspace.data()+2*n*n, n, n);
467  Index k1=0, k2=0;
468  for(Index j=0; j<n; ++j)
469  {
470  if( (A.col(j).head(n1).array()!=Literal(0)).any() )
471  {
472  A1.col(k1) = A.col(j).head(n1);
473  B1.row(k1) = B.row(j);
474  ++k1;
475  }
476  if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
477  {
478  A2.col(k2) = A.col(j).tail(n2);
479  B2.row(k2) = B.row(j);
480  ++k2;
481  }
482  }
483 
484  A.topRows(n1).noalias() = A1.leftCols(k1) * B1.topRows(k1);
485  A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);
486  }
487  else
488  {
489  Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);
490  tmp.noalias() = A*B;
491  A = tmp;
492  }
493 }
494 
495 template <typename MatrixType, int Options>
496 template <typename SVDType>
498  Index firstColW, Index shift) {
499  svd.compute(m_computed.block(firstCol, firstCol, n + 1, n));
500  m_info = svd.info();
501  if (m_info != Success && m_info != NoConvergence) return;
502  if (m_compU)
503  m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = svd.matrixU();
504  else {
505  m_naiveU.row(0).segment(firstCol, n + 1).real() = svd.matrixU().row(0);
506  m_naiveU.row(1).segment(firstCol, n + 1).real() = svd.matrixU().row(n);
507  }
508  if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = svd.matrixV();
509  m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
510  m_computed.diagonal().segment(firstCol + shift, n) = svd.singularValues().head(n);
511 }
512 
513 // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods
514 // takes as argument the place of the submatrix we are currently working on.
515 
516 //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
517 //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
518 // lastCol + 1 - firstCol is the size of the submatrix.
519 //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
520 //@param firstColW : Same as firstRowW with the column.
521 //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
522 // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
523 template <typename MatrixType, int Options>
524 void BDCSVD<MatrixType, Options>::divide(Index firstCol, Index lastCol, Index firstRowW,
525  Index firstColW, Index shift) {
526  // requires rows = cols + 1;
527  using std::pow;
528  using std::sqrt;
529  using std::abs;
530  const Index n = lastCol - firstCol + 1;
531  const Index k = n/2;
532  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
533  RealScalar alphaK;
534  RealScalar betaK;
535  RealScalar r0;
536  RealScalar lambda, phi, c0, s0;
537  VectorType l, f;
538  // We use the other algorithm which is more efficient for small
539  // matrices.
540  if (n < m_algoswap)
541  {
542  // FIXME this block involves temporaries
543  if (m_compV) {
545  computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift);
546  } else {
548  computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift);
549  }
550  return;
551  }
552  // We use the divide and conquer algorithm
553  alphaK = m_computed(firstCol + k, firstCol + k);
554  betaK = m_computed(firstCol + k + 1, firstCol + k);
555  // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
556  // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
557  // right submatrix before the left one.
558  divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
559  if (m_info != Success && m_info != NoConvergence) return;
560  divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
561  if (m_info != Success && m_info != NoConvergence) return;
562 
563  if (m_compU)
564  {
565  lambda = m_naiveU(firstCol + k, firstCol + k);
566  phi = m_naiveU(firstCol + k + 1, lastCol + 1);
567  }
568  else
569  {
570  lambda = m_naiveU(1, firstCol + k);
571  phi = m_naiveU(0, lastCol + 1);
572  }
573  r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));
574  if (m_compU)
575  {
576  l = m_naiveU.row(firstCol + k).segment(firstCol, k);
577  f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
578  }
579  else
580  {
581  l = m_naiveU.row(1).segment(firstCol, k);
582  f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
583  }
584  if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
585  if (r0<considerZero)
586  {
587  c0 = Literal(1);
588  s0 = Literal(0);
589  }
590  else
591  {
592  c0 = alphaK * lambda / r0;
593  s0 = betaK * phi / r0;
594  }
595 
596 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
597  eigen_internal_assert(m_naiveU.allFinite());
598  eigen_internal_assert(m_naiveV.allFinite());
599  eigen_internal_assert(m_computed.allFinite());
600 #endif
601 
602  if (m_compU)
603  {
604  MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
605  // we shiftW Q1 to the right
606  for (Index i = firstCol + k - 1; i >= firstCol; i--)
607  m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);
608  // we shift q1 at the left with a factor c0
609  m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);
610  // last column = q1 * - s0
611  m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));
612  // first column = q2 * s0
613  m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0;
614  // q2 *= c0
615  m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
616  }
617  else
618  {
619  RealScalar q1 = m_naiveU(0, firstCol + k);
620  // we shift Q1 to the right
621  for (Index i = firstCol + k - 1; i >= firstCol; i--)
622  m_naiveU(0, i + 1) = m_naiveU(0, i);
623  // we shift q1 at the left with a factor c0
624  m_naiveU(0, firstCol) = (q1 * c0);
625  // last column = q1 * - s0
626  m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
627  // first column = q2 * s0
628  m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
629  // q2 *= c0
630  m_naiveU(1, lastCol + 1) *= c0;
631  m_naiveU.row(1).segment(firstCol + 1, k).setZero();
632  m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
633  }
634 
635 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
636  eigen_internal_assert(m_naiveU.allFinite());
637  eigen_internal_assert(m_naiveV.allFinite());
638  eigen_internal_assert(m_computed.allFinite());
639 #endif
640 
641  m_computed(firstCol + shift, firstCol + shift) = r0;
642  m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();
643  m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();
644 
645 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
646  ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
647 #endif
648  // Second part: try to deflate singular values in combined matrix
649  deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
650 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
651  ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
652  std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n";
653  std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n";
654  std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n";
655  static int count = 0;
656  std::cout << "# " << ++count << "\n\n";
657  eigen_internal_assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());
658 // eigen_internal_assert(count<681);
659 // eigen_internal_assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());
660 #endif
661 
662  // Third part: compute SVD of combined matrix
663  MatrixXr UofSVD, VofSVD;
664  VectorType singVals;
665  computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
666 
667 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
668  eigen_internal_assert(UofSVD.allFinite());
669  eigen_internal_assert(VofSVD.allFinite());
670 #endif
671 
672  if (m_compU)
673  structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);
674  else
675  {
676  Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);
677  tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;
678  m_naiveU.middleCols(firstCol, n + 1) = tmp;
679  }
680 
681  if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);
682 
683 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
684  eigen_internal_assert(m_naiveU.allFinite());
685  eigen_internal_assert(m_naiveV.allFinite());
686  eigen_internal_assert(m_computed.allFinite());
687 #endif
688 
689  m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
690  m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
691 } // end divide
692 
693 // Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
694 // the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
695 // order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
696 // that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.
697 //
698 // TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
699 // handling of round-off errors, be consistent in ordering
700 // For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
701 template <typename MatrixType, int Options>
703  VectorType& singVals, MatrixXr& V) {
704  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
705  using std::abs;
706  ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
707  m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal();
708  ArrayRef diag = m_workspace.head(n);
709  diag(0) = Literal(0);
710 
711  // Allocate space for singular values and vectors
712  singVals.resize(n);
713  U.resize(n+1, n+1);
714  if (m_compV) V.resize(n, n);
715 
716 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
717  if (col0.hasNaN() || diag.hasNaN())
718  std::cout << "\n\nHAS NAN\n\n";
719 #endif
720 
721  // Many singular values might have been deflated, the zero ones have been moved to the end,
722  // but others are interleaved and we must ignore them at this stage.
723  // To this end, let's compute a permutation skipping them:
724  Index actual_n = n;
725  while(actual_n>1 && numext::is_exactly_zero(diag(actual_n - 1))) {
726  --actual_n;
728  }
729  Index m = 0; // size of the deflated problem
730  for(Index k=0;k<actual_n;++k)
731  if(abs(col0(k))>considerZero)
732  m_workspaceI(m++) = k;
733  Map<ArrayXi> perm(m_workspaceI.data(),m);
734 
735  Map<ArrayXr> shifts(m_workspace.data()+1*n, n);
736  Map<ArrayXr> mus(m_workspace.data()+2*n, n);
737  Map<ArrayXr> zhat(m_workspace.data()+3*n, n);
738 
739 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
740  std::cout << "computeSVDofM using:\n";
741  std::cout << " z: " << col0.transpose() << "\n";
742  std::cout << " d: " << diag.transpose() << "\n";
743 #endif
744 
745  // Compute singVals, shifts, and mus
746  computeSingVals(col0, diag, perm, singVals, shifts, mus);
747 
748 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
749  std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n";
750  std::cout << " sing-val: " << singVals.transpose() << "\n";
751  std::cout << " mu: " << mus.transpose() << "\n";
752  std::cout << " shift: " << shifts.transpose() << "\n";
753 
754  {
755  std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
756  std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
757  eigen_internal_assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all());
758  std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
759  eigen_internal_assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all());
760  }
761 #endif
762 
763 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
764  eigen_internal_assert(singVals.allFinite());
765  eigen_internal_assert(mus.allFinite());
766  eigen_internal_assert(shifts.allFinite());
767 #endif
768 
769  // Compute zhat
770  perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);
771 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
772  std::cout << " zhat: " << zhat.transpose() << "\n";
773 #endif
774 
775 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
776  eigen_internal_assert(zhat.allFinite());
777 #endif
778 
779  computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);
780 
781 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
782  std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n";
783  std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n";
784 #endif
785 
786 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
787  eigen_internal_assert(m_naiveU.allFinite());
788  eigen_internal_assert(m_naiveV.allFinite());
789  eigen_internal_assert(m_computed.allFinite());
790  eigen_internal_assert(U.allFinite());
792 // eigen_internal_assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
793 // eigen_internal_assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
794 #endif
795 
796  // Because of deflation, the singular values might not be completely sorted.
797  // Fortunately, reordering them is a O(n) problem
798  for(Index i=0; i<actual_n-1; ++i)
799  {
800  if(singVals(i)>singVals(i+1))
801  {
802  using std::swap;
803  swap(singVals(i),singVals(i+1));
804  U.col(i).swap(U.col(i+1));
805  if(m_compV) V.col(i).swap(V.col(i+1));
806  }
807  }
808 
809 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
810  {
811  bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all();
812  if(!singular_values_sorted)
813  std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n";
814  eigen_internal_assert(singular_values_sorted);
815  }
816 #endif
817 
818  // Reverse order so that singular values in increased order
819  // Because of deflation, the zeros singular-values are already at the end
820  singVals.head(actual_n).reverseInPlace();
821  U.leftCols(actual_n).rowwise().reverseInPlace();
822  if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();
823 
824 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
825  JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );
826  std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n";
827  std::cout << " * sing-val: " << singVals.transpose() << "\n";
828 // std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n";
829 #endif
830 }
831 
832 template <typename MatrixType, int Options>
834  RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const ArrayRef& diagShifted,
835  RealScalar shift) {
836  Index m = perm.size();
837  RealScalar res = Literal(1);
838  for(Index i=0; i<m; ++i)
839  {
840  Index j = perm(i);
841  // The following expression could be rewritten to involve only a single division,
842  // but this would make the expression more sensitive to overflow.
843  res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
844  }
845  return res;
846 }
847 
848 template <typename MatrixType, int Options>
850  VectorType& singVals, ArrayRef shifts, ArrayRef mus) {
851  using std::abs;
852  using std::swap;
853  using std::sqrt;
854 
855  Index n = col0.size();
856  Index actual_n = n;
857  // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
858  // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
859  while(actual_n>1 && numext::is_exactly_zero(col0(actual_n - 1))) --actual_n;
860 
861  for (Index k = 0; k < n; ++k)
862  {
863  if (numext::is_exactly_zero(col0(k)) || actual_n == 1)
864  {
865  // if col0(k) == 0, then entry is deflated, so singular value is on diagonal
866  // if actual_n==1, then the deflated problem is already diagonalized
867  singVals(k) = k==0 ? col0(0) : diag(k);
868  mus(k) = Literal(0);
869  shifts(k) = k==0 ? col0(0) : diag(k);
870  continue;
871  }
872 
873  // otherwise, use secular equation to find singular value
874  RealScalar left = diag(k);
875  RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());
876  if(k==actual_n-1)
877  right = (diag(actual_n-1) + col0.matrix().norm());
878  else
879  {
880  // Skip deflated singular values,
881  // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
882  // This should be equivalent to using perm[]
883  Index l = k+1;
884  while(numext::is_exactly_zero(col0(l))) { ++l; eigen_internal_assert(l < actual_n); }
885  right = diag(l);
886  }
887 
888  // first decide whether it's closer to the left end or the right end
889  RealScalar mid = left + (right-left) / Literal(2);
890  RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
891 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
892  std::cout << "right-left = " << right-left << "\n";
893 // std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, ArrayXr(diag-left), left)
894 // << " " << secularEq(mid-right, col0, diag, perm, ArrayXr(diag-right), right) << "\n";
895  std::cout << " = " << secularEq(left+RealScalar(0.000001)*(right-left), col0, diag, perm, diag, 0)
896  << " " << secularEq(left+RealScalar(0.1) *(right-left), col0, diag, perm, diag, 0)
897  << " " << secularEq(left+RealScalar(0.2) *(right-left), col0, diag, perm, diag, 0)
898  << " " << secularEq(left+RealScalar(0.3) *(right-left), col0, diag, perm, diag, 0)
899  << " " << secularEq(left+RealScalar(0.4) *(right-left), col0, diag, perm, diag, 0)
900  << " " << secularEq(left+RealScalar(0.49) *(right-left), col0, diag, perm, diag, 0)
901  << " " << secularEq(left+RealScalar(0.5) *(right-left), col0, diag, perm, diag, 0)
902  << " " << secularEq(left+RealScalar(0.51) *(right-left), col0, diag, perm, diag, 0)
903  << " " << secularEq(left+RealScalar(0.6) *(right-left), col0, diag, perm, diag, 0)
904  << " " << secularEq(left+RealScalar(0.7) *(right-left), col0, diag, perm, diag, 0)
905  << " " << secularEq(left+RealScalar(0.8) *(right-left), col0, diag, perm, diag, 0)
906  << " " << secularEq(left+RealScalar(0.9) *(right-left), col0, diag, perm, diag, 0)
907  << " " << secularEq(left+RealScalar(0.999999)*(right-left), col0, diag, perm, diag, 0) << "\n";
908 #endif
909  RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
910 
911  // measure everything relative to shift
912  Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
913  diagShifted = diag - shift;
914 
915  if(k!=actual_n-1)
916  {
917  // check that after the shift, f(mid) is still negative:
918  RealScalar midShifted = (right - left) / RealScalar(2);
919  // we can test exact equality here, because shift comes from `... ? left : right`
920  if(numext::equal_strict(shift, right))
921  midShifted = -midShifted;
922  RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
923  if(fMidShifted>0)
924  {
925  // fMid was erroneous, fix it:
926  shift = fMidShifted > Literal(0) ? left : right;
927  diagShifted = diag - shift;
928  }
929  }
930 
931  // initial guess
932  RealScalar muPrev, muCur;
933  // we can test exact equality here, because shift comes from `... ? left : right`
934  if (numext::equal_strict(shift, left))
935  {
936  muPrev = (right - left) * RealScalar(0.1);
937  if (k == actual_n-1) muCur = right - left;
938  else muCur = (right - left) * RealScalar(0.5);
939  }
940  else
941  {
942  muPrev = -(right - left) * RealScalar(0.1);
943  muCur = -(right - left) * RealScalar(0.5);
944  }
945 
946  RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
947  RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);
948  if (abs(fPrev) < abs(fCur))
949  {
950  swap(fPrev, fCur);
951  swap(muPrev, muCur);
952  }
953 
954  // rational interpolation: fit a function of the form a / mu + b through the two previous
955  // iterates and use its zero to compute the next iterate
956  bool useBisection = fPrev*fCur>Literal(0);
957  while (!numext::is_exactly_zero(fCur) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev) > NumTraits<RealScalar>::epsilon() && !useBisection)
958  {
959  ++m_numIters;
960 
961  // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
962  RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
963  RealScalar b = fCur - a / muCur;
964  // And find mu such that f(mu)==0:
965  RealScalar muZero = -a/b;
966  RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
967 
968 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
970 #endif
971 
972  muPrev = muCur;
973  fPrev = fCur;
974  muCur = muZero;
975  fCur = fZero;
976 
977  // we can test exact equality here, because shift comes from `... ? left : right`
978  if (numext::equal_strict(shift, left) && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
979  if (numext::equal_strict(shift, right) && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
980  if (abs(fCur)>abs(fPrev)) useBisection = true;
981  }
982 
983  // fall back on bisection method if rational interpolation did not work
984  if (useBisection)
985  {
986 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
987  std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n";
988 #endif
989  RealScalar leftShifted, rightShifted;
990  // we can test exact equality here, because shift comes from `... ? left : right`
991  if (numext::equal_strict(shift, left))
992  {
993  // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
994  // the factor 2 is to be more conservative
995  leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
996 
997  // check that we did it right:
998  eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
999  // I don't understand why the case k==0 would be special there:
1000  // if (k == 0) rightShifted = right - left; else
1001  rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
1002  }
1003  else
1004  {
1005  leftShifted = -(right - left) * RealScalar(0.51);
1006  if(k+1<n)
1007  rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
1008  else
1009  rightShifted = -(std::numeric_limits<RealScalar>::min)();
1010  }
1011 
1012  RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
1013  eigen_internal_assert(fLeft<Literal(0));
1014 
1015 #if defined EIGEN_BDCSVD_DEBUG_VERBOSE || defined EIGEN_BDCSVD_SANITY_CHECKS || defined EIGEN_INTERNAL_DEBUGGING
1016  RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
1017 #endif
1018 
1019 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1020  if(!(numext::isfinite)(fLeft))
1021  std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n";
1023 
1024  if(!(numext::isfinite)(fRight))
1025  std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n";
1026  // eigen_internal_assert((numext::isfinite)(fRight));
1027 #endif
1028 
1029 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1030  if(!(fLeft * fRight<0))
1031  {
1032  std::cout << "f(leftShifted) using leftShifted=" << leftShifted << " ; diagShifted(1:10):" << diagShifted.head(10).transpose() << "\n ; "
1033  << "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n";
1034  std::cout << "k=" << k << ", " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; "
1035  << "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift
1036  << " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift)
1037  << " == " << secularEq(right, col0, diag, perm, diag, 0) << " == " << fRight << "\n";
1038  }
1039 #endif
1040  eigen_internal_assert(fLeft * fRight < Literal(0));
1041 
1042  if(fLeft<Literal(0))
1043  {
1044  while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
1045  {
1046  RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
1047  fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
1049 
1050  if (fLeft * fMid < Literal(0))
1051  {
1052  rightShifted = midShifted;
1053  }
1054  else
1055  {
1056  leftShifted = midShifted;
1057  fLeft = fMid;
1058  }
1059  }
1060  muCur = (leftShifted + rightShifted) / Literal(2);
1061  }
1062  else
1063  {
1064  // We have a problem as shifting on the left or right give either a positive or negative value
1065  // at the middle of [left,right]...
1066  // Instead fo abbording or entering an infinite loop,
1067  // let's just use the middle as the estimated zero-crossing:
1068  muCur = (right - left) * RealScalar(0.5);
1069  // we can test exact equality here, because shift comes from `... ? left : right`
1070  if(numext::equal_strict(shift, right))
1071  muCur = -muCur;
1072  }
1073  }
1074 
1075  singVals[k] = shift + muCur;
1076  shifts[k] = shift;
1077  mus[k] = muCur;
1078 
1079 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1080  if(k+1<n)
1081  std::cout << "found " << singVals[k] << " == " << shift << " + " << muCur << " from " << diag(k) << " .. " << diag(k+1) << "\n";
1082 #endif
1083 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1084  eigen_internal_assert(k==0 || singVals[k]>=singVals[k-1]);
1085  eigen_internal_assert(singVals[k]>=diag(k));
1086 #endif
1087 
1088  // perturb singular value slightly if it equals diagonal entry to avoid division by zero later
1089  // (deflation is supposed to avoid this from happening)
1090  // - this does no seem to be necessary anymore -
1091  // if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
1092  // if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
1093  }
1094 }
1095 
1096 // zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
1097 template <typename MatrixType, int Options>
1098 void BDCSVD<MatrixType, Options>::perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm,
1099  const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus,
1100  ArrayRef zhat) {
1101  using std::sqrt;
1102  Index n = col0.size();
1103  Index m = perm.size();
1104  if(m==0)
1105  {
1106  zhat.setZero();
1107  return;
1108  }
1109  Index lastIdx = perm(m-1);
1110  // The offset permits to skip deflated entries while computing zhat
1111  for (Index k = 0; k < n; ++k)
1112  {
1113  if (numext::is_exactly_zero(col0(k))) // deflated
1114  zhat(k) = Literal(0);
1115  else
1116  {
1117  // see equation (3.6)
1118  RealScalar dk = diag(k);
1119  RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk));
1120 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1121  if(prod<0) {
1122  std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n";
1123  std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n";
1124  std::cout << " = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) << "\n";
1125  }
1126  eigen_internal_assert(prod>=0);
1127 #endif
1128 
1129  for(Index l = 0; l<m; ++l)
1130  {
1131  Index i = perm(l);
1132  if(i!=k)
1133  {
1134 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1135  if(i>=k && (l==0 || l-1>=m))
1136  {
1137  std::cout << "Error in perturbCol0\n";
1138  std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n";
1139  std::cout << " " <<diag(i) << "\n";
1140  Index j = (i<k /*|| l==0*/) ? i : perm(l-1);
1141  std::cout << " " << "j=" << j << "\n";
1142  }
1143 #endif
1144  // Avoid index out of bounds.
1145  // Will end up setting zhat(k) = 0.
1146  if (i >= k && l == 0) {
1147  m_info = NumericalIssue;
1148  prod = 0;
1149  break;
1150  }
1151  Index j = i<k ? i : l > 0 ? perm(l-1) : i;
1152 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1153  if(!(dk!=Literal(0) || diag(i)!=Literal(0)))
1154  {
1155  std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n";
1156  }
1157  eigen_internal_assert(dk!=Literal(0) || diag(i)!=Literal(0));
1158 #endif
1159  prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
1160 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1161  eigen_internal_assert(prod>=0);
1162 #endif
1163 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1164  if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
1165  std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
1166  << ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
1167 #endif
1168  }
1169  }
1170 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1171  std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n";
1172 #endif
1173  RealScalar tmp = sqrt(prod);
1174 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1176 #endif
1177  zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
1178  }
1179  }
1180 }
1181 
1182 // compute singular vectors
1183 template <typename MatrixType, int Options>
1185  const VectorType& singVals, const ArrayRef& shifts,
1186  const ArrayRef& mus, MatrixXr& U, MatrixXr& V) {
1187  Index n = zhat.size();
1188  Index m = perm.size();
1189 
1190  for (Index k = 0; k < n; ++k)
1191  {
1192  if (numext::is_exactly_zero(zhat(k)))
1193  {
1194  U.col(k) = VectorType::Unit(n+1, k);
1195  if (m_compV) V.col(k) = VectorType::Unit(n, k);
1196  }
1197  else
1198  {
1199  U.col(k).setZero();
1200  for(Index l=0;l<m;++l)
1201  {
1202  Index i = perm(l);
1203  U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
1204  }
1205  U(n,k) = Literal(0);
1206  U.col(k).normalize();
1207 
1208  if (m_compV)
1209  {
1210  V.col(k).setZero();
1211  for(Index l=1;l<m;++l)
1212  {
1213  Index i = perm(l);
1214  V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
1215  }
1216  V(0,k) = Literal(-1);
1217  V.col(k).normalize();
1218  }
1219  }
1220  }
1221  U.col(n) = VectorType::Unit(n+1, n);
1222 }
1223 
1224 // page 12_13
1225 // i >= 1, di almost null and zi non null.
1226 // We use a rotation to zero out zi applied to the left of M
1227 template <typename MatrixType, int Options>
1229  Index size) {
1230  using std::abs;
1231  using std::sqrt;
1232  using std::pow;
1233  Index start = firstCol + shift;
1234  RealScalar c = m_computed(start, start);
1235  RealScalar s = m_computed(start+i, start);
1236  RealScalar r = numext::hypot(c,s);
1237  if (numext::is_exactly_zero(r))
1238  {
1239  m_computed(start+i, start+i) = Literal(0);
1240  return;
1241  }
1242  m_computed(start,start) = r;
1243  m_computed(start+i, start) = Literal(0);
1244  m_computed(start+i, start+i) = Literal(0);
1245 
1246  JacobiRotation<RealScalar> J(c/r,-s/r);
1247  if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
1248  else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);
1249 } // end deflation 43
1250 
1251 // page 13
1252 // i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
1253 // We apply two rotations to have zj = 0;
1254 // TODO deflation44 is still broken and not properly tested
1255 template <typename MatrixType, int Options>
1256 void BDCSVD<MatrixType, Options>::deflation44(Index firstColu, Index firstColm, Index firstRowW,
1257  Index firstColW, Index i, Index j,
1258  Index size) {
1259  using std::abs;
1260  using std::sqrt;
1261  using std::conj;
1262  using std::pow;
1263  RealScalar c = m_computed(firstColm+i, firstColm);
1264  RealScalar s = m_computed(firstColm+j, firstColm);
1266 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1267  std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; "
1268  << m_computed(firstColm + i-1, firstColm) << " "
1269  << m_computed(firstColm + i, firstColm) << " "
1270  << m_computed(firstColm + i+1, firstColm) << " "
1271  << m_computed(firstColm + i+2, firstColm) << "\n";
1272  std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " "
1273  << m_computed(firstColm + i, firstColm+i) << " "
1274  << m_computed(firstColm + i+1, firstColm+i+1) << " "
1275  << m_computed(firstColm + i+2, firstColm+i+2) << "\n";
1276 #endif
1277  if (numext::is_exactly_zero(r))
1278  {
1279  m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
1280  return;
1281  }
1282  c/=r;
1283  s/=r;
1284  m_computed(firstColm + i, firstColm) = r;
1285  m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
1286  m_computed(firstColm + j, firstColm) = Literal(0);
1287 
1289  if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
1290  else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);
1291  if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);
1292 } // end deflation 44
1293 
1294 // acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
1295 template <typename MatrixType, int Options>
1297  Index firstRowW, Index firstColW, Index shift) {
1298  using std::sqrt;
1299  using std::abs;
1300  const Index length = lastCol + 1 - firstCol;
1301 
1302  Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
1303  Diagonal<MatrixXr> fulldiag(m_computed);
1304  VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);
1305 
1306  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
1307  RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
1308  RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
1309  RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
1310 
1311 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1312  eigen_internal_assert(m_naiveU.allFinite());
1313  eigen_internal_assert(m_naiveV.allFinite());
1314  eigen_internal_assert(m_computed.allFinite());
1315 #endif
1316 
1317 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1318  std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n";
1319 #endif
1320 
1321  //condition 4.1
1322  if (diag(0) < epsilon_coarse)
1323  {
1324 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1325  std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n";
1326 #endif
1327  diag(0) = epsilon_coarse;
1328  }
1329 
1330  //condition 4.2
1331  for (Index i=1;i<length;++i)
1332  if (abs(col0(i)) < epsilon_strict)
1333  {
1334 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1335  std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << " (diag(" << i << ")=" << diag(i) << ")\n";
1336 #endif
1337  col0(i) = Literal(0);
1338  }
1339 
1340  //condition 4.3
1341  for (Index i=1;i<length; i++)
1342  if (diag(i) < epsilon_coarse)
1343  {
1344 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1345  std::cout << "deflation 4.3, cancel z(" << i << ")=" << col0(i) << " because diag(" << i << ")=" << diag(i) << " < " << epsilon_coarse << "\n";
1346 #endif
1347  deflation43(firstCol, shift, i, length);
1348  }
1349 
1350 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1351  eigen_internal_assert(m_naiveU.allFinite());
1352  eigen_internal_assert(m_naiveV.allFinite());
1353  eigen_internal_assert(m_computed.allFinite());
1354 #endif
1355 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1356  std::cout << "to be sorted: " << diag.transpose() << "\n\n";
1357  std::cout << " : " << col0.transpose() << "\n\n";
1358 #endif
1359  {
1360  // Check for total deflation:
1361  // If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting.
1362  const bool total_deflation = (col0.tail(length-1).array().abs()<considerZero).all();
1363 
1364  // Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
1365  // First, compute the respective permutation.
1366  Index *permutation = m_workspaceI.data();
1367  {
1368  permutation[0] = 0;
1369  Index p = 1;
1370 
1371  // Move deflated diagonal entries at the end.
1372  for(Index i=1; i<length; ++i)
1373  if(abs(diag(i))<considerZero)
1374  permutation[p++] = i;
1375 
1376  Index i=1, j=k+1;
1377  for( ; p < length; ++p)
1378  {
1379  if (i > k) permutation[p] = j++;
1380  else if (j >= length) permutation[p] = i++;
1381  else if (diag(i) < diag(j)) permutation[p] = j++;
1382  else permutation[p] = i++;
1383  }
1384  }
1385 
1386  // If we have a total deflation, then we have to insert diag(0) at the right place
1387  if(total_deflation)
1388  {
1389  for(Index i=1; i<length; ++i)
1390  {
1391  Index pi = permutation[i];
1392  if(abs(diag(pi))<considerZero || diag(0)<diag(pi))
1393  permutation[i-1] = permutation[i];
1394  else
1395  {
1396  permutation[i-1] = 0;
1397  break;
1398  }
1399  }
1400  }
1401 
1402  // Current index of each col, and current column of each index
1403  Index *realInd = m_workspaceI.data()+length;
1404  Index *realCol = m_workspaceI.data()+2*length;
1405 
1406  for(int pos = 0; pos< length; pos++)
1407  {
1408  realCol[pos] = pos;
1409  realInd[pos] = pos;
1410  }
1411 
1412  for(Index i = total_deflation?0:1; i < length; i++)
1413  {
1414  const Index pi = permutation[length - (total_deflation ? i+1 : i)];
1415  const Index J = realCol[pi];
1416 
1417  using std::swap;
1418  // swap diagonal and first column entries:
1419  swap(diag(i), diag(J));
1420  if(i!=0 && J!=0) swap(col0(i), col0(J));
1421 
1422  // change columns
1423  if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));
1424  else m_naiveU.col(firstCol+i).segment(0, 2) .swap(m_naiveU.col(firstCol+J).segment(0, 2));
1425  if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));
1426 
1427  //update real pos
1428  const Index realI = realInd[i];
1429  realCol[realI] = J;
1430  realCol[pi] = i;
1431  realInd[J] = realI;
1432  realInd[i] = pi;
1433  }
1434  }
1435 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1436  std::cout << "sorted: " << diag.transpose().format(bdcsvdfmt) << "\n";
1437  std::cout << " : " << col0.transpose() << "\n\n";
1438 #endif
1439 
1440  //condition 4.4
1441  {
1442  Index i = length-1;
1443  while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;
1444  for(; i>1;--i)
1445  if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
1446  {
1447 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1448  std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*/*diag(i)*/maxDiag << "\n";
1449 #endif
1450  eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
1451  deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
1452  }
1453  }
1454 
1455 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1456  for(Index j=2;j<length;++j)
1457  eigen_internal_assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);
1458 #endif
1459 
1460 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1461  eigen_internal_assert(m_naiveU.allFinite());
1462  eigen_internal_assert(m_naiveV.allFinite());
1463  eigen_internal_assert(m_computed.allFinite());
1464 #endif
1465 } // end deflation
1466 
1473 template <typename Derived>
1474 template <int Options>
1476  return BDCSVD<PlainObject, Options>(*this);
1477 }
1478 
1485 template <typename Derived>
1486 template <int Options>
1488  unsigned int computationOptions) const {
1489  return BDCSVD<PlainObject, Options>(*this, computationOptions);
1490 }
1491 
1492 } // end namespace Eigen
1493 
1494 #endif
Matrix3f m
const AbsReturnType abs() const
const SqrtReturnType sqrt() const
Array< int, 3, 1 > b
int n
FixedSegmentReturnType<... >::Type head(NType n)
MatrixXcf A
cout<< "The eigenvalues of A are:"<< endl<< ces.eigenvalues()<< endl;cout<< "The matrix of eigenvectors, V, is:"<< endl<< ces.eigenvectors()<< endl<< endl;complex< float > lambda
Array< double, 1, 3 > e(1./3., 0.5, 2.)
Array33i c
MatrixXf B
cout<< "Here is the matrix m:"<< endl<< m<< endl;JacobiSVD< MatrixXf, ComputeThinU|ComputeThinV > svd(m)
JacobiRotation< float > J
#define EIGEN_DEPRECATED
Definition: Macros.h:923
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define EIGEN_DISABLE_DEPRECATED_WARNING
Definition: Macros.h:949
#define eigen_assert(x)
Definition: Macros.h:902
#define EIGEN_DIAGNOSTICS(tokens)
Definition: Macros.h:945
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
float * p
Matrix< float, 1, Dynamic > MatrixType
class Bidiagonal Divide and Conquer SVD
Definition: BDCSVD.h:104
Index m_nRec
Definition: BDCSVD.h:259
void allocate(Index rows, Index cols, unsigned int computationOptions)
Definition: BDCSVD.h:287
void structured_update(Block< MatrixXr, Dynamic, Dynamic > A, const MatrixXr &B, Index n1)
Definition: BDCSVD.h:456
void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev)
Definition: BDCSVD.h:424
bool m_isTranspose
Definition: BDCSVD.h:263
Ref< ArrayXi > IndicesRef
Definition: BDCSVD.h:141
Base::MatrixUType MatrixUType
Definition: BDCSVD.h:131
BDCSVD & compute(const MatrixType &matrix)
Method performing the decomposition of given matrix. Computes Thin/Full unitaries U/V if specified us...
Definition: BDCSVD.h:215
ArrayXi m_workspaceI
Definition: BDCSVD.h:261
Base::SingularValuesType SingularValuesType
Definition: BDCSVD.h:133
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
Definition: EigenBase.h:65
EIGEN_DEPRECATED BDCSVD(const MatrixType &matrix, unsigned int computationOptions)
Constructor performing the decomposition of given matrix using specified options for computing unitar...
Definition: BDCSVD.h:203
BDCSVD & compute_impl(const MatrixType &matrix, unsigned int computationOptions)
Definition: BDCSVD.h:327
Base::RealScalar RealScalar
Definition: BDCSVD.h:115
void computeBaseCase(SVDType &svd, Index n, Index firstCol, Index firstRowW, Index firstColW, Index shift)
Definition: BDCSVD.h:497
NumTraits< RealScalar >::Literal Literal
Definition: BDCSVD.h:116
bool m_useQrDecomp
Definition: BDCSVD.h:263
Base::Index Index
Definition: BDCSVD.h:117
void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)
Definition: BDCSVD.h:524
void computeSVDofM(Index firstCol, Index n, MatrixXr &U, VectorType &singVals, MatrixXr &V)
Definition: BDCSVD.h:702
static RealScalar secularEq(RealScalar x, const ArrayRef &col0, const ArrayRef &diag, const IndicesRef &perm, const ArrayRef &diagShifted, RealScalar shift)
Definition: BDCSVD.h:833
MatrixType_ MatrixType
Definition: BDCSVD.h:113
Array< RealScalar, Dynamic, 1 > ArrayXr
Definition: BDCSVD.h:138
void computeSingVecs(const ArrayRef &zhat, const ArrayRef &diag, const IndicesRef &perm, const VectorType &singVals, const ArrayRef &shifts, const ArrayRef &mus, MatrixXr &U, MatrixXr &V)
Definition: BDCSVD.h:1184
Matrix< RealScalar, Dynamic, Dynamic, ColMajor > MatrixXr
Definition: BDCSVD.h:136
BDCSVD(const MatrixType &matrix)
Constructor performing the decomposition of given matrix, using the custom options specified with the...
Definition: BDCSVD.h:186
void setSwitchSize(int s)
Definition: BDCSVD.h:232
SVDBase< BDCSVD > Base
Definition: BDCSVD.h:105
void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)
Definition: BDCSVD.h:1296
@ QRDecomposition
Definition: BDCSVD.h:120
@ DiagSizeAtCompileTime
Definition: BDCSVD.h:124
@ ComputationOptions
Definition: BDCSVD.h:121
@ MaxColsAtCompileTime
Definition: BDCSVD.h:126
@ RowsAtCompileTime
Definition: BDCSVD.h:122
@ ColsAtCompileTime
Definition: BDCSVD.h:123
@ MaxDiagSizeAtCompileTime
Definition: BDCSVD.h:127
@ MatrixOptions
Definition: BDCSVD.h:128
@ MaxRowsAtCompileTime
Definition: BDCSVD.h:125
MatrixX copyWorkspace
Definition: BDCSVD.h:267
JacobiSVD< MatrixType, ComputationOptions > smallSvd
Definition: BDCSVD.h:264
void perturbCol0(const ArrayRef &col0, const ArrayRef &diag, const IndicesRef &perm, const VectorType &singVals, const ArrayRef &shifts, const ArrayRef &mus, ArrayRef zhat)
Definition: BDCSVD.h:1098
MatrixXr m_computed
Definition: BDCSVD.h:258
Matrix< RealScalar, Dynamic, 1 > VectorType
Definition: BDCSVD.h:137
EIGEN_DEPRECATED BDCSVD(Index rows, Index cols, unsigned int computationOptions)
Default Constructor with memory preallocation.
Definition: BDCSVD.h:176
void deflation43(Index firstCol, Index shift, Index i, Index size)
Definition: BDCSVD.h:1228
MatrixXr m_naiveV
Definition: BDCSVD.h:257
int m_numIters
Definition: BDCSVD.h:282
bool m_compU
Definition: BDCSVD.h:263
int m_algoswap
Definition: BDCSVD.h:262
BDCSVD()
Default Constructor.
Definition: BDCSVD.h:148
Matrix< Scalar, Dynamic, Dynamic, ColMajor > MatrixX
Definition: BDCSVD.h:135
MatrixXr m_naiveU
Definition: BDCSVD.h:257
Base::MatrixVType MatrixVType
Definition: BDCSVD.h:132
HouseholderQR< MatrixX > qrDecomp
Definition: BDCSVD.h:265
Base::Scalar Scalar
Definition: BDCSVD.h:114
void computeSingVals(const ArrayRef &col0, const ArrayRef &diag, const IndicesRef &perm, VectorType &singVals, ArrayRef shifts, ArrayRef mus)
Definition: BDCSVD.h:849
bool m_compV
Definition: BDCSVD.h:263
Array< Index, 1, Dynamic > ArrayXi
Definition: BDCSVD.h:139
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
Definition: EigenBase.h:62
ArrayXr m_workspace
Definition: BDCSVD.h:260
MatrixX reducedTriangle
Definition: BDCSVD.h:268
EIGEN_DEPRECATED BDCSVD & compute(const MatrixType &matrix, unsigned int computationOptions)
Method performing the decomposition of given matrix, as specified by the computationOptions parameter...
Definition: BDCSVD.h:227
void deflation44(Index firstColu, Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)
Definition: BDCSVD.h:1256
Ref< ArrayXr > ArrayRef
Definition: BDCSVD.h:140
internal::UpperBidiagonalization< MatrixX > bid
Definition: BDCSVD.h:266
BDCSVD(Index rows, Index cols)
Default Constructor with memory preallocation.
Definition: BDCSVD.h:157
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:107
TransposeReturnType transpose()
Definition: Transpose.h:184
ConstRowwiseReturnType rowwise() const
Definition: DenseBase.h:548
bool allFinite() const
Definition: Visitor.h:835
void swap(const DenseBase< OtherDerived > &other)
Definition: DenseBase.h:418
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:67
Householder QR decomposition of a matrix.
Definition: HouseholderQR.h:60
Rotation given by a cosine-sine pair.
Definition: Jacobi.h:37
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:98
NoAlias< Derived, Eigen::MatrixBase > noalias()
Definition: NoAlias.h:104
ArrayWrapper< Derived > array()
Definition: MatrixBase.h:324
BDCSVD< PlainObject, Options > bdcSvd() const
static const IdentityReturnType Identity()
void normalize()
Definition: Dot.h:143
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
Derived & setZero(Index size)
constexpr void resize(Index rows, Index cols)
A matrix or vector expression mapping an existing expression.
Definition: Ref.h:285
Base class of SVD algorithms.
Definition: SVDBase.h:120
Index cols() const
Definition: SVDBase.h:287
internal::make_proper_matrix_type< Scalar, RowsAtCompileTime, MatrixUColsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MatrixUMaxColsAtCompileTime >::type MatrixUType
Definition: SVDBase.h:156
ComputationInfo m_info
Definition: SVDBase.h:346
bool m_computeThinU
Definition: SVDBase.h:348
const SingularValuesType & singularValues() const
Definition: SVDBase.h:203
NumTraits< typename MatrixType::Scalar >::Real RealScalar
Definition: SVDBase.h:128
bool computeV() const
Definition: SVDBase.h:284
Eigen::Index Index
Definition: SVDBase.h:130
bool m_isInitialized
Definition: SVDBase.h:347
MatrixVType m_matrixV
Definition: SVDBase.h:344
Index m_diagSize
Definition: SVDBase.h:351
bool computeU() const
Definition: SVDBase.h:282
internal::plain_diag_type< MatrixType, RealScalar >::type SingularValuesType
Definition: SVDBase.h:161
internal::make_proper_matrix_type< Scalar, ColsAtCompileTime, MatrixVColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MatrixVMaxColsAtCompileTime >::type MatrixVType
Definition: SVDBase.h:159
MatrixType::Scalar Scalar
Definition: SVDBase.h:127
Index m_nonzeroSingularValues
Definition: SVDBase.h:351
Index rows() const
Definition: SVDBase.h:286
SingularValuesType m_singularValues
Definition: SVDBase.h:345
bool m_computeThinV
Definition: SVDBase.h:349
@ ColsAtCompileTime
Definition: SVDBase.h:139
@ DiagSizeAtCompileTime
Definition: SVDBase.h:140
@ MaxRowsAtCompileTime
Definition: SVDBase.h:141
@ MaxDiagSizeAtCompileTime
Definition: SVDBase.h:143
@ MaxColsAtCompileTime
Definition: SVDBase.h:142
@ RowsAtCompileTime
Definition: SVDBase.h:138
internal::traits< BDCSVD< MatrixType_, Options_ > >::MatrixType MatrixType
Definition: SVDBase.h:126
MatrixUType m_matrixU
Definition: SVDBase.h:343
Expression of a fixed-size or dynamic-size sub-vector.
Definition: VectorBlock.h:62
void reverseInPlace()
Definition: Reverse.h:212
static const Eigen::internal::all_t all
@ Aligned
Definition: Constants.h:242
@ DisableQRDecomposition
Definition: Constants.h:435
@ NumericalIssue
Definition: Constants.h:448
@ InvalidInput
Definition: Constants.h:453
@ Success
Definition: Constants.h:446
@ NoConvergence
Definition: Constants.h:450
Matrix< Type, Dynamic, Dynamic > MatrixX
[c++11] Dynamic×Dynamic matrix of type Type.
Definition: Matrix.h:537
bfloat16() max(const bfloat16 &a, const bfloat16 &b)
Definition: BFloat16.h:690
bfloat16 pow(const bfloat16 &a, const bfloat16 &b)
Definition: BFloat16.h:626
bfloat16() min(const bfloat16 &a, const bfloat16 &b)
Definition: BFloat16.h:684
@ QRPreconditionerBits
Definition: SVDBase.h:26
@ ComputationOptionsBits
Definition: SVDBase.h:28
constexpr int get_computation_options(int options)
Definition: SVDBase.h:33
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:788
bool equal_strict(const X &x, const Y &y)
Definition: Meta.h:460
bool abs2(bool x)
EIGEN_ALWAYS_INLINE bool() isfinite(const Eigen::bfloat16 &h)
Definition: BFloat16.h:790
bool is_exactly_zero(const X &x)
Definition: Meta.h:475
EIGEN_ALWAYS_INLINE std::enable_if_t< NumTraits< T >::IsSigned||NumTraits< T >::IsComplex, typename NumTraits< T >::Real > abs(const T &x)
: InteropHeaders
Definition: Core:139
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_conjugate_op< typename Derived::Scalar >, const Derived > conj(const Eigen::ArrayBase< Derived > &x)
const int Dynamic
Definition: Constants.h:24
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_abs_op< typename Derived::Scalar >, const Derived > abs(const Eigen::ArrayBase< Derived > &x)
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_sqrt_op< typename Derived::Scalar >, const Derived > sqrt(const Eigen::ArrayBase< Derived > &x)
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41
EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT
Definition: EigenBase.h:69
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:231
std::ptrdiff_t j