IncompleteLUT.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
5 // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_INCOMPLETE_LUT_H
12 #define EIGEN_INCOMPLETE_LUT_H
13 
14 
15 #include "./InternalHeaderCheck.h"
16 
17 namespace Eigen {
18 
19 namespace internal {
20 
30 template <typename VectorV, typename VectorI>
31 Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
32 {
33  typedef typename VectorV::RealScalar RealScalar;
34  using std::swap;
35  using std::abs;
36  Index mid;
37  Index n = row.size(); /* length of the vector */
38  Index first, last ;
39 
40  ncut--; /* to fit the zero-based indices */
41  first = 0;
42  last = n-1;
43  if (ncut < first || ncut > last ) return 0;
44 
45  do {
46  mid = first;
47  RealScalar abskey = abs(row(mid));
48  for (Index j = first + 1; j <= last; j++) {
49  if ( abs(row(j)) > abskey) {
50  ++mid;
51  swap(row(mid), row(j));
52  swap(ind(mid), ind(j));
53  }
54  }
55  /* Interchange for the pivot element */
56  swap(row(mid), row(first));
57  swap(ind(mid), ind(first));
58 
59  if (mid > ncut) last = mid - 1;
60  else if (mid < ncut ) first = mid + 1;
61  } while (mid != ncut );
62 
63  return 0; /* mid is equal to ncut */
64 }
65 
66 }// end namespace internal
67 
100 template <typename Scalar_, typename StorageIndex_ = int>
101 class IncompleteLUT : public SparseSolverBase<IncompleteLUT<Scalar_, StorageIndex_> >
102 {
103  protected:
105  using Base::m_isInitialized;
106  public:
107  typedef Scalar_ Scalar;
108  typedef StorageIndex_ StorageIndex;
113 
114  enum {
117  };
118 
119  public:
120 
122  : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
123  m_analysisIsOk(false), m_factorizationIsOk(false)
124  {}
125 
126  template<typename MatrixType>
127  explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
128  : m_droptol(droptol),m_fillfactor(fillfactor),
129  m_analysisIsOk(false),m_factorizationIsOk(false)
130  {
131  eigen_assert(fillfactor != 0);
132  compute(mat);
133  }
134 
136 
138 
145  {
146  eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
147  return m_info;
148  }
149 
150  template<typename MatrixType>
151  void analyzePattern(const MatrixType& amat);
152 
153  template<typename MatrixType>
154  void factorize(const MatrixType& amat);
155 
161  template<typename MatrixType>
163  {
164  analyzePattern(amat);
165  factorize(amat);
166  return *this;
167  }
168 
169  void setDroptol(const RealScalar& droptol);
170  void setFillfactor(int fillfactor);
171 
172  template<typename Rhs, typename Dest>
173  void _solve_impl(const Rhs& b, Dest& x) const
174  {
175  x = m_Pinv * b;
176  x = m_lu.template triangularView<UnitLower>().solve(x);
177  x = m_lu.template triangularView<Upper>().solve(x);
178  x = m_P * x;
179  }
180 
181 protected:
182 
184  struct keep_diag {
185  inline bool operator() (const Index& row, const Index& col, const Scalar&) const
186  {
187  return row!=col;
188  }
189  };
190 
191 protected:
192 
201 };
202 
207 template<typename Scalar, typename StorageIndex>
209 {
210  this->m_droptol = droptol;
211 }
212 
217 template<typename Scalar, typename StorageIndex>
219 {
220  this->m_fillfactor = fillfactor;
221 }
222 
223 template <typename Scalar, typename StorageIndex>
224 template<typename MatrixType_>
226 {
227  // Compute the Fill-reducing permutation
228  // Since ILUT does not perform any numerical pivoting,
229  // it is highly preferable to keep the diagonal through symmetric permutations.
230  // To this end, let's symmetrize the pattern and perform AMD on it.
233  // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
234  // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred...
236  AMDOrdering<StorageIndex> ordering;
237  ordering(AtA,m_P);
238  m_Pinv = m_P.inverse(); // cache the inverse permutation
239  m_analysisIsOk = true;
240  m_factorizationIsOk = false;
241  m_isInitialized = true;
242 }
243 
244 template <typename Scalar, typename StorageIndex>
245 template<typename MatrixType_>
247 {
248  using std::sqrt;
249  using std::swap;
250  using std::abs;
252 
253  eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
254  Index n = amat.cols(); // Size of the matrix
255  m_lu.resize(n,n);
256  // Declare Working vectors and variables
257  Vector u(n) ; // real values of the row -- maximum size is n --
258  VectorI ju(n); // column position of the values in u -- maximum size is n
259  VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
260 
261  // Apply the fill-reducing permutation
262  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
264  mat = amat.twistedBy(m_Pinv);
265 
266  // Initialization
267  jr.fill(-1);
268  ju.fill(0);
269  u.fill(0);
270 
271  // number of largest elements to keep in each row:
272  Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1;
273  if (fill_in > n) fill_in = n;
274 
275  // number of largest nonzero elements to keep in the L and the U part of the current row:
276  Index nnzL = fill_in/2;
277  Index nnzU = nnzL;
278  m_lu.reserve(n * (nnzL + nnzU + 1));
279 
280  // global loop over the rows of the sparse matrix
281  for (Index ii = 0; ii < n; ii++)
282  {
283  // 1 - copy the lower and the upper part of the row i of mat in the working vector u
284 
285  Index sizeu = 1; // number of nonzero elements in the upper part of the current row
286  Index sizel = 0; // number of nonzero elements in the lower part of the current row
287  ju(ii) = convert_index<StorageIndex>(ii);
288  u(ii) = 0;
289  jr(ii) = convert_index<StorageIndex>(ii);
290  RealScalar rownorm = 0;
291 
292  typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
293  for (; j_it; ++j_it)
294  {
295  Index k = j_it.index();
296  if (k < ii)
297  {
298  // copy the lower part
299  ju(sizel) = convert_index<StorageIndex>(k);
300  u(sizel) = j_it.value();
301  jr(k) = convert_index<StorageIndex>(sizel);
302  ++sizel;
303  }
304  else if (k == ii)
305  {
306  u(ii) = j_it.value();
307  }
308  else
309  {
310  // copy the upper part
311  Index jpos = ii + sizeu;
312  ju(jpos) = convert_index<StorageIndex>(k);
313  u(jpos) = j_it.value();
314  jr(k) = convert_index<StorageIndex>(jpos);
315  ++sizeu;
316  }
317  rownorm += numext::abs2(j_it.value());
318  }
319 
320  // 2 - detect possible zero row
321  if(rownorm==0)
322  {
323  m_info = NumericalIssue;
324  return;
325  }
326  // Take the 2-norm of the current row as a relative tolerance
327  rownorm = sqrt(rownorm);
328 
329  // 3 - eliminate the previous nonzero rows
330  Index jj = 0;
331  Index len = 0;
332  while (jj < sizel)
333  {
334  // In order to eliminate in the correct order,
335  // we must select first the smallest column index among ju(jj:sizel)
336  Index k;
337  Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
338  k += jj;
339  if (minrow != ju(jj))
340  {
341  // swap the two locations
342  Index j = ju(jj);
343  swap(ju(jj), ju(k));
344  jr(minrow) = convert_index<StorageIndex>(jj);
345  jr(j) = convert_index<StorageIndex>(k);
346  swap(u(jj), u(k));
347  }
348  // Reset this location
349  jr(minrow) = -1;
350 
351  // Start elimination
352  typename FactorType::InnerIterator ki_it(m_lu, minrow);
353  while (ki_it && ki_it.index() < minrow) ++ki_it;
354  eigen_internal_assert(ki_it && ki_it.col()==minrow);
355  Scalar fact = u(jj) / ki_it.value();
356 
357  // drop too small elements
358  if(abs(fact) <= m_droptol)
359  {
360  jj++;
361  continue;
362  }
363 
364  // linear combination of the current row ii and the row minrow
365  ++ki_it;
366  for (; ki_it; ++ki_it)
367  {
368  Scalar prod = fact * ki_it.value();
369  Index j = ki_it.index();
370  Index jpos = jr(j);
371  if (jpos == -1) // fill-in element
372  {
373  Index newpos;
374  if (j >= ii) // dealing with the upper part
375  {
376  newpos = ii + sizeu;
377  sizeu++;
378  eigen_internal_assert(sizeu<=n);
379  }
380  else // dealing with the lower part
381  {
382  newpos = sizel;
383  sizel++;
384  eigen_internal_assert(sizel<=ii);
385  }
386  ju(newpos) = convert_index<StorageIndex>(j);
387  u(newpos) = -prod;
388  jr(j) = convert_index<StorageIndex>(newpos);
389  }
390  else
391  u(jpos) -= prod;
392  }
393  // store the pivot element
394  u(len) = fact;
395  ju(len) = convert_index<StorageIndex>(minrow);
396  ++len;
397 
398  jj++;
399  } // end of the elimination on the row ii
400 
401  // reset the upper part of the pointer jr to zero
402  for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
403 
404  // 4 - partially sort and insert the elements in the m_lu matrix
405 
406  // sort the L-part of the row
407  sizel = len;
408  len = (std::min)(sizel, nnzL);
409  typename Vector::SegmentReturnType ul(u.segment(0, sizel));
410  typename VectorI::SegmentReturnType jul(ju.segment(0, sizel));
411  internal::QuickSplit(ul, jul, len);
412 
413  // store the largest m_fill elements of the L part
414  m_lu.startVec(ii);
415  for(Index k = 0; k < len; k++)
416  m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
417 
418  // store the diagonal element
419  // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
420  if (u(ii) == Scalar(0))
421  u(ii) = sqrt(m_droptol) * rownorm;
422  m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
423 
424  // sort the U-part of the row
425  // apply the dropping rule first
426  len = 0;
427  for(Index k = 1; k < sizeu; k++)
428  {
429  if(abs(u(ii+k)) > m_droptol * rownorm )
430  {
431  ++len;
432  u(ii + len) = u(ii + k);
433  ju(ii + len) = ju(ii + k);
434  }
435  }
436  sizeu = len + 1; // +1 to take into account the diagonal element
437  len = (std::min)(sizeu, nnzU);
438  typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
439  typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
440  internal::QuickSplit(uu, juu, len);
441 
442  // store the largest elements of the U part
443  for(Index k = ii + 1; k < ii + len; k++)
444  m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
445  }
446  m_lu.finalize();
447  m_lu.makeCompressed();
448 
449  m_factorizationIsOk = true;
450  m_info = Success;
451 }
452 
453 } // end namespace Eigen
454 
455 #endif // EIGEN_INCOMPLETE_LUT_H
const AbsReturnType abs() const
const SqrtReturnType sqrt() const
Array< int, 3, 1 > b
int n
RowXpr row(Index i)
This is the const version of row(). *‍/.
ColXpr col(Index i)
This is the const version of col().
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define EIGEN_NOEXCEPT
Definition: Macros.h:1260
#define EIGEN_CONSTEXPR
Definition: Macros.h:747
#define eigen_assert(x)
Definition: Macros.h:902
std::vector< int > ind
MatrixXd mat1(size, size)
Matrix< float, 1, Dynamic > MatrixType
Incomplete LU factorization with dual-threshold strategy.
Matrix< StorageIndex, Dynamic, 1 > VectorI
ComputationInfo m_info
SparseMatrix< Scalar, RowMajor, StorageIndex > FactorType
void analyzePattern(const MatrixType &amat)
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
void setFillfactor(int fillfactor)
void factorize(const MatrixType &amat)
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
NumTraits< Scalar >::Real RealScalar
IncompleteLUT & compute(const MatrixType &amat)
void _solve_impl(const Rhs &b, Dest &x) const
void setDroptol(const RealScalar &droptol)
SparseSolverBase< IncompleteLUT > Base
StorageIndex_ StorageIndex
PermutationMatrix< Dynamic, Dynamic, StorageIndex > m_Pinv
IncompleteLUT(const MatrixType &mat, const RealScalar &droptol=NumTraits< Scalar >::dummy_precision(), int fillfactor=10)
Matrix< Scalar, Dynamic, 1 > Vector
ComputationInfo info() const
Reports whether previous computation was successful.
PermutationMatrix< Dynamic, Dynamic, StorageIndex > m_P
TransposeReturnType transpose()
Index cols() const
Definition: SparseMatrix.h:167
Index rows() const
Definition: SparseMatrix.h:165
A base class for sparse solvers.
static const last_t last
ComputationInfo
Definition: Constants.h:444
@ NumericalIssue
Definition: Constants.h:448
@ Success
Definition: Constants.h:446
bfloat16() min(const bfloat16 &a, const bfloat16 &b)
Definition: BFloat16.h:684
Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
Definition: IncompleteLUT.h:31
IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:64
EIGEN_CONSTEXPR Index first(const T &x) EIGEN_NOEXCEPT
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:788
bool abs2(bool x)
: InteropHeaders
Definition: Core:139
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const int Dynamic
Definition: Constants.h:24
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_abs_op< typename Derived::Scalar >, const Derived > abs(const Eigen::ArrayBase< Derived > &x)
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_sqrt_op< typename Derived::Scalar >, const Derived > sqrt(const Eigen::ArrayBase< Derived > &x)
bool operator()(const Index &row, const Index &col, const Scalar &) const
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:231
std::ptrdiff_t j