SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
47 namespace internal {
48 template<typename Scalar_, int Options_, typename StorageIndex_>
49 struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_> >
50 {
51  typedef Scalar_ Scalar;
52  typedef StorageIndex_ StorageIndex;
53  typedef Sparse StorageKind;
54  typedef MatrixXpr XprKind;
55  enum {
56  RowsAtCompileTime = Dynamic,
57  ColsAtCompileTime = Dynamic,
58  MaxRowsAtCompileTime = Dynamic,
59  MaxColsAtCompileTime = Dynamic,
60  Flags = Options_ | NestByRefBit | LvalueBit | CompressedAccessBit,
61  SupportedAccessPatterns = InnerRandomAccessPattern
62  };
63 };
64 
65 template<typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
66 struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
67 {
68  typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
69  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
70  typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
71 
72  typedef Scalar_ Scalar;
73  typedef Dense StorageKind;
74  typedef StorageIndex_ StorageIndex;
75  typedef MatrixXpr XprKind;
76 
77  enum {
78  RowsAtCompileTime = Dynamic,
79  ColsAtCompileTime = 1,
80  MaxRowsAtCompileTime = Dynamic,
81  MaxColsAtCompileTime = 1,
82  Flags = LvalueBit
83  };
84 };
85 
86 template<typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
87 struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
88  : public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
89 {
90  enum {
91  Flags = 0
92  };
93 };
94 
95 template <typename StorageIndex>
96 struct sparse_reserve_op {
97  EIGEN_DEVICE_FUNC sparse_reserve_op(Index begin, Index end, Index size) {
98  Index range = numext::mini(end - begin, size);
99  m_begin = begin;
100  m_end = begin + range;
101  m_val = StorageIndex(size / range);
102  m_remainder = StorageIndex(size % range);
103  }
104  template <typename IndexType>
105  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i) const {
106  if ((i >= m_begin) && (i < m_end))
107  return m_val + ((i - m_begin) < m_remainder ? 1 : 0);
108  else
109  return 0;
110  }
111  StorageIndex m_val, m_remainder;
112  Index m_begin, m_end;
113 };
114 
115 template <typename Scalar>
116 struct functor_traits<sparse_reserve_op<Scalar>> {
117  enum { Cost = 1, PacketAccess = false, IsRepeatable = true };
118 };
119 
120 } // end namespace internal
121 
122 template<typename Scalar_, int Options_, typename StorageIndex_>
124  : public SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_> >
125 {
127  using Base::convert_index;
128  friend class SparseVector<Scalar_,0,StorageIndex_>;
129  template<typename, typename, typename, typename, typename>
130  friend struct internal::Assignment;
131  public:
132  using Base::isCompressed;
133  using Base::nonZeros;
135  using Base::operator+=;
136  using Base::operator-=;
137 
141  typedef typename Base::InnerIterator InnerIterator;
142  typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
143 
144 
145  using Base::IsRowMajor;
146  typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
147  enum {
148  Options = Options_
149  };
150 
151  typedef typename Base::IndexVector IndexVector;
153  protected:
155 
159  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
161 
162  public:
163 
165  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
167  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
168 
170  inline Index innerSize() const { return m_innerSize; }
172  inline Index outerSize() const { return m_outerSize; }
173 
177  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
181  inline Scalar* valuePtr() { return m_data.valuePtr(); }
182 
186  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
190  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
191 
195  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
200 
204  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
209 
211  inline Storage& data() { return m_data; }
213  inline const Storage& data() const { return m_data; }
214 
217  inline Scalar coeff(Index row, Index col) const
218  {
219  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
220 
221  const Index outer = IsRowMajor ? row : col;
222  const Index inner = IsRowMajor ? col : row;
223  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
224  return m_data.atInRange(m_outerIndex[outer], end, inner);
225  }
226 
236  {
237  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
238  const Index outer = IsRowMajor ? row : col;
239  const Index inner = IsRowMajor ? col : row;
240  Index start = m_outerIndex[outer];
241  Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
242  eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
243  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
244  if (dst == end) {
245  Index capacity = m_outerIndex[outer + 1] - end;
246  if (capacity > 0) {
247  // implies uncompressed: push to back of vector
248  m_innerNonZeros[outer]++;
249  m_data.index(end) = StorageIndex(inner);
250  m_data.value(end) = Scalar(0);
251  return m_data.value(end);
252  }
253  }
254  if ((dst < end) && (m_data.index(dst) == inner))
255  // this coefficient exists, return a refernece to it
256  return m_data.value(dst);
257  else
258  // insertion will require reconfiguring the buffer
259  return insertAtByOuterInner(outer, inner, dst);
260  }
261 
278 
279  public:
280 
288  inline void setZero()
289  {
290  m_data.clear();
291  std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
292  if(m_innerNonZeros) {
293  std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
294  }
295  }
296 
300  inline void reserve(Index reserveSize)
301  {
302  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
303  m_data.reserve(reserveSize);
304  }
305 
306  #ifdef EIGEN_PARSED_BY_DOXYGEN
319  template<class SizesType>
320  inline void reserve(const SizesType& reserveSizes);
321  #else
322  template<class SizesType>
323  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
324  typename SizesType::value_type())
325  {
326  EIGEN_UNUSED_VARIABLE(enableif);
327  reserveInnerVectors(reserveSizes);
328  }
329  #endif // EIGEN_PARSED_BY_DOXYGEN
330  protected:
331  template<class SizesType>
332  inline void reserveInnerVectors(const SizesType& reserveSizes)
333  {
334  if(isCompressed())
335  {
336  Index totalReserveSize = 0;
337  for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
338 
339  // if reserveSizes is empty, don't do anything!
340  if (totalReserveSize == 0) return;
341 
342  // turn the matrix into non-compressed mode
343  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
344 
345  // temporarily use m_innerSizes to hold the new starting points.
346  StorageIndex* newOuterIndex = m_innerNonZeros;
347 
348  Index count = 0;
349  for(Index j=0; j<m_outerSize; ++j)
350  {
351  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
352  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
353  count += reserveSize + internal::convert_index<Index>(m_outerIndex[j+1]-m_outerIndex[j]);
354  }
355 
356  m_data.reserve(totalReserveSize);
357  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
358  for(Index j=m_outerSize-1; j>=0; --j)
359  {
360  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
361  StorageIndex begin = m_outerIndex[j];
362  StorageIndex end = begin + innerNNZ;
363  StorageIndex target = newOuterIndex[j];
365  internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
366  previousOuterIndex = m_outerIndex[j];
367  m_outerIndex[j] = newOuterIndex[j];
368  m_innerNonZeros[j] = innerNNZ;
369  }
370  if(m_outerSize>0)
371  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + internal::convert_index<StorageIndex>(reserveSizes[m_outerSize-1]);
372 
374  }
375  else
376  {
377  StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
378 
379  Index count = 0;
380  for(Index j=0; j<m_outerSize; ++j)
381  {
382  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
383  Index alreadyReserved = internal::convert_index<Index>(m_outerIndex[j+1] - m_outerIndex[j] - m_innerNonZeros[j]);
384  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
385  Index toReserve = numext::maxi(reserveSize, alreadyReserved);
386  count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
387  }
388  newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
389 
390  m_data.resize(count);
391  for(Index j=m_outerSize-1; j>=0; --j)
392  {
393  StorageIndex innerNNZ = m_innerNonZeros[j];
394  StorageIndex begin = m_outerIndex[j];
395  StorageIndex target = newOuterIndex[j];
396  m_data.moveChunk(begin, target, innerNNZ);
397  }
398 
399  std::swap(m_outerIndex, newOuterIndex);
400  internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
401  }
402 
403  }
404  public:
405 
406  //--- low level purely coherent filling ---
407 
419  {
421  }
422 
425  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
426  {
427  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
428  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
429  StorageIndex p = m_outerIndex[outer+1];
430  ++m_outerIndex[outer+1];
431  m_data.append(Scalar(0), inner);
432  return m_data.value(p);
433  }
434 
438  {
439  StorageIndex p = m_outerIndex[outer+1];
440  ++m_outerIndex[outer+1];
441  m_data.append(Scalar(0), inner);
442  return m_data.value(p);
443  }
444 
447  inline void startVec(Index outer)
448  {
449  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
450  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
451  m_outerIndex[outer+1] = m_outerIndex[outer];
452  }
453 
457  inline void finalize()
458  {
459  if(isCompressed())
460  {
461  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
462  Index i = m_outerSize;
463  // find the last filled column
464  while (i>=0 && m_outerIndex[i]==0)
465  --i;
466  ++i;
467  while (i<=m_outerSize)
468  {
469  m_outerIndex[i] = size;
470  ++i;
471  }
472  }
473  }
474 
475  // remove outer vectors j, j+1 ... j+num-1 and resize the matrix
476  void removeOuterVectors(Index j, Index num = 1) {
477  eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
478 
479  const Index newRows = IsRowMajor ? m_outerSize - num : rows();
480  const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
481 
482  const Index begin = j + num;
483  const Index end = m_outerSize;
484  const Index target = j;
485 
486  // if the removed vectors are not empty, uncompress the matrix
487  if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
488 
489  // shift m_outerIndex and m_innerNonZeros [num] to the left
491  if (!isCompressed())
493 
494  // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
495  if (m_outerIndex[0] > StorageIndex(0)) {
496  uncompress();
497  const Index from = internal::convert_index<Index>(m_outerIndex[0]);
498  const Index to = Index(0);
499  const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500  m_data.moveChunk(from, to, chunkSize);
501  m_outerIndex[0] = StorageIndex(0);
502  }
503 
504  // truncate the matrix to the smaller size
505  conservativeResize(newRows, newCols);
506  }
507 
508  // insert empty outer vectors at indices j, j+1 ... j+num-1 and resize the matrix
510  EIGEN_USING_STD(fill_n);
511  eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
512 
513  const Index newRows = IsRowMajor ? m_outerSize + num : rows();
514  const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
515 
516  const Index begin = j;
517  const Index end = m_outerSize;
518  const Index target = j + num;
519 
520  // expand the matrix to the larger size
521  conservativeResize(newRows, newCols);
522 
523  // shift m_outerIndex and m_innerNonZeros [num] to the right
525  // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
526  fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
527 
528  if (!isCompressed()) {
530  // set the nonzeros of the newly inserted vectors to 0
531  fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
532  }
533  }
534 
535  template<typename InputIterators>
536  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
537 
538  template<typename InputIterators,typename DupFunctor>
539  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
540 
541  template<typename Derived, typename DupFunctor>
542  void collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func = DupFunctor());
543 
544  template<typename InputIterators>
545  void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
546 
547  template<typename InputIterators, typename DupFunctor>
548  void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
549 
550  template<typename InputIterators>
551  void insertFromTriplets(const InputIterators& begin, const InputIterators& end);
552 
553  template<typename InputIterators, typename DupFunctor>
554  void insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
555 
556  template<typename InputIterators>
557  void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
558 
559  template<typename InputIterators, typename DupFunctor>
560  void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
561 
562  //---
563 
567  {
568  Index start = m_outerIndex[j];
569  Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
570  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
571  if (dst == end) {
572  Index capacity = m_outerIndex[j + 1] - end;
573  if (capacity > 0) {
574  // implies uncompressed: push to back of vector
575  m_innerNonZeros[j]++;
576  m_data.index(end) = StorageIndex(i);
577  m_data.value(end) = Scalar(0);
578  return m_data.value(end);
579  }
580  }
581  eigen_assert((dst == end || m_data.index(dst) != i) &&
582  "you cannot insert an element that already exists, you must call coeffRef to this end");
583  return insertAtByOuterInner(j, i, dst);
584  }
585 
589  {
590  if (isCompressed()) return;
591 
593 
594  StorageIndex start = m_outerIndex[1];
596  // try to move fewer, larger contiguous chunks
597  Index copyStart = start;
598  Index copyTarget = m_innerNonZeros[0];
599  for (Index j = 1; j < m_outerSize; j++)
600  {
601  StorageIndex end = start + m_innerNonZeros[j];
602  StorageIndex nextStart = m_outerIndex[j + 1];
603  // dont forget to move the last chunk!
604  bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
605  if (breakUpCopy)
606  {
607  Index chunkSize = end - copyStart;
608  if(chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
609  copyStart = nextStart;
610  copyTarget += chunkSize;
611  }
612  start = nextStart;
614  }
616 
617  // release as much memory as possible
618  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
619  m_innerNonZeros = 0;
620  m_data.squeeze();
621  }
622 
624  void uncompress()
625  {
626  if (!isCompressed()) return;
627  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
628  if (m_outerIndex[m_outerSize] == 0)
629  std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
630  else
631  for (Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
632  }
633 
635  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
636  {
637  prune(default_prunning_func(reference,epsilon));
638  }
639 
647  template<typename KeepFunc>
648  void prune(const KeepFunc& keep = KeepFunc())
649  {
650  StorageIndex k = 0;
651  for(Index j=0; j<m_outerSize; ++j)
652  {
653  StorageIndex previousStart = m_outerIndex[j];
654  if (isCompressed())
655  m_outerIndex[j] = k;
656  else
657  k = m_outerIndex[j];
658  StorageIndex end = isCompressed() ? m_outerIndex[j+1] : previousStart + m_innerNonZeros[j];
659  for(StorageIndex i=previousStart; i<end; ++i)
660  {
663  bool keepEntry = keep(row, col, m_data.value(i));
664  if (keepEntry) {
665  m_data.value(k) = m_data.value(i);
666  m_data.index(k) = m_data.index(i);
667  ++k;
668  } else if (!isCompressed())
669  m_innerNonZeros[j]--;
670  }
671  }
672  if (isCompressed()) {
674  m_data.resize(k, 0);
675  }
676  }
677 
687 
688  // If one dimension is null, then there is nothing to be preserved
689  if (rows == 0 || cols == 0) return resize(rows, cols);
690 
691  Index newOuterSize = IsRowMajor ? rows : cols;
692  Index newInnerSize = IsRowMajor ? cols : rows;
693 
694  Index innerChange = newInnerSize - m_innerSize;
695  Index outerChange = newOuterSize - m_outerSize;
696 
697  if (outerChange != 0) {
698  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
699  m_outerIndex, newOuterSize + 1, m_outerSize + 1);
700 
701  if (!isCompressed())
702  m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
703  m_innerNonZeros, newOuterSize, m_outerSize);
704 
705  if (outerChange > 0) {
707  std::fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
708 
709  if (!isCompressed()) std::fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
710  }
711  }
712  m_outerSize = newOuterSize;
713 
714  if (innerChange < 0) {
715  for (Index j = 0; j < m_outerSize; j++) {
716  Index start = m_outerIndex[j];
717  Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
718  Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
719  if (lb != end) {
720  uncompress();
721  m_innerNonZeros[j] = StorageIndex(lb - start);
722  }
723  }
724  }
725  m_innerSize = newInnerSize;
726 
727  Index newSize = m_outerIndex[m_outerSize];
728  eigen_assert(newSize <= m_data.size());
729  m_data.resize(newSize);
730  }
731 
740  {
741  const Index outerSize = IsRowMajor ? rows : cols;
743  m_data.clear();
744 
745  if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
746  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1, m_outerSize + 1);
748  }
749 
750  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
751  m_innerNonZeros = 0;
752 
753  std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
754  }
755 
759  {
760  m_data.resize(size);
761  }
762 
765 
771 
773  inline SparseMatrix()
775  {
776  resize(0, 0);
777  }
778 
782  {
783  resize(rows, cols);
784  }
785 
787  template<typename OtherDerived>
790  {
791  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
792  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
793  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
794  if (needToTranspose)
795  *this = other.derived();
796  else
797  {
798  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
799  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
800  #endif
802  }
803  }
804 
806  template<typename OtherDerived, unsigned int UpLo>
809  {
810  Base::operator=(other);
811  }
812 
814  {
815  *this = other.derived().markAsRValue();
816  }
817 
819  inline SparseMatrix(const SparseMatrix& other)
821  {
822  *this = other.derived();
823  }
824 
826  template<typename OtherDerived>
829  {
830  initAssignment(other);
831  other.evalTo(*this);
832  }
833 
835  template<typename OtherDerived>
838  {
839  *this = other.derived();
840  }
841 
844  inline void swap(SparseMatrix& other)
845  {
846  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
851  m_data.swap(other.m_data);
852  }
853 
856  inline void setIdentity()
857  {
858  eigen_assert(m_outerSize == m_innerSize && "ONLY FOR SQUARED MATRICES");
859  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
860  m_innerNonZeros = 0;
861  m_data.resize(m_outerSize);
862  // is it necessary to squeeze?
863  m_data.squeeze();
864  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
865  std::iota(innerIndexPtr(), innerIndexPtr() + m_outerSize, StorageIndex(0));
866  std::fill_n(valuePtr(), m_outerSize, Scalar(1));
867  }
868 
869  inline SparseMatrix& operator=(const SparseMatrix& other)
870  {
871  if (other.isRValue())
872  {
873  swap(other.const_cast_derived());
874  }
875  else if(this!=&other)
876  {
877  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
878  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
879  #endif
880  initAssignment(other);
881  if(other.isCompressed())
882  {
884  m_data = other.m_data;
885  }
886  else
887  {
888  Base::operator=(other);
889  }
890  }
891  return *this;
892  }
893 
895  return *this = other.derived().markAsRValue();
896  }
897 
898 #ifndef EIGEN_PARSED_BY_DOXYGEN
899  template<typename OtherDerived>
900  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
901  { return Base::operator=(other.derived()); }
902 
903  template<typename Lhs, typename Rhs>
904  inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
905 #endif // EIGEN_PARSED_BY_DOXYGEN
906 
907  template<typename OtherDerived>
909 
910 #ifndef EIGEN_NO_IO
911  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
912  {
914  s << "Nonzero entries:\n";
915  if(m.isCompressed())
916  {
917  for (Index i=0; i<m.nonZeros(); ++i)
918  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
919  }
920  else
921  {
922  for (Index i=0; i<m.outerSize(); ++i)
923  {
924  Index p = m.m_outerIndex[i];
925  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
926  Index k=p;
927  for (; k<pe; ++k) {
928  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
929  }
930  for (; k<m.m_outerIndex[i+1]; ++k) {
931  s << "(_,_) ";
932  }
933  }
934  }
935  s << std::endl;
936  s << std::endl;
937  s << "Outer pointers:\n";
938  for (Index i=0; i<m.outerSize(); ++i) {
939  s << m.m_outerIndex[i] << " ";
940  }
941  s << " $" << std::endl;
942  if(!m.isCompressed())
943  {
944  s << "Inner non zeros:\n";
945  for (Index i=0; i<m.outerSize(); ++i) {
946  s << m.m_innerNonZeros[i] << " ";
947  }
948  s << " $" << std::endl;
949  }
950  s << std::endl;
951  );
952  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
953  return s;
954  }
955 #endif
956 
958  inline ~SparseMatrix()
959  {
960  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
961  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
962  }
963 
965  Scalar sum() const;
966 
967 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
968 # include EIGEN_SPARSEMATRIX_PLUGIN
969 # endif
970 
971 protected:
972 
973  template<typename Other>
974  void initAssignment(const Other& other)
975  {
976  resize(other.rows(), other.cols());
977  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
978  m_innerNonZeros = 0;
979  }
980 
984 
988  {
991  public:
994  : m_index(convert_index(i)), m_value(convert_index(v))
995  {}
996 
997  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
998  };
999 
1003 
1004 public:
1008  {
1009  const Index outer = IsRowMajor ? row : col;
1010  const Index inner = IsRowMajor ? col : row;
1011 
1012  eigen_assert(!isCompressed());
1013  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
1014 
1015  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
1016  m_data.index(p) = StorageIndex(inner);
1017  m_data.value(p) = Scalar(0);
1018  return m_data.value(p);
1019  }
1020 protected:
1021  struct IndexPosPair {
1022  IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
1025  };
1026 
1036  template<typename DiagXpr, typename Func>
1037  void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
1038  {
1039 
1040  constexpr StorageIndex kEmptyIndexVal(-1);
1041  typedef typename ScalarVector::AlignedMapType ValueMap;
1042 
1043  Index n = diagXpr.size();
1044 
1045  const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
1046  if(overwrite)
1047  {
1048  if((m_outerSize != n) || (m_innerSize != n))
1049  resize(n, n);
1050  }
1051 
1052  if(m_data.size()==0 || overwrite)
1053  {
1054  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1055  m_innerNonZeros = 0;
1056  resizeNonZeros(n);
1057  ValueMap valueMap(valuePtr(), n);
1058  std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1059  std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1060  valueMap.setZero();
1061  internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1062  }
1063  else
1064  {
1065  internal::evaluator<DiagXpr> diaEval(diagXpr);
1066 
1068  typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1069  insertionLocations.setConstant(kEmptyIndexVal);
1070 
1071  Index deferredInsertions = 0;
1072  Index shift = 0;
1073 
1074  for (Index j = 0; j < n; j++) {
1075  Index begin = m_outerIndex[j];
1076  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1077  Index capacity = m_outerIndex[j + 1] - end;
1078  Index dst = m_data.searchLowerIndex(begin, end, j);
1079  // the entry exists: update it now
1080  if (dst != end && m_data.index(dst) == StorageIndex(j)) assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1081  // the entry belongs at the back of the vector: push to back
1082  else if (dst == end && capacity > 0)
1083  assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1084  // the insertion requires a data move, record insertion location and handle in second pass
1085  else {
1086  insertionLocations.coeffRef(j) = StorageIndex(dst);
1087  deferredInsertions++;
1088  // if there is no capacity, all vectors to the right of this are shifted
1089  if (capacity == 0) shift++;
1090  }
1091  }
1092 
1093  if (deferredInsertions > 0) {
1094 
1095  m_data.resize(m_data.size() + shift);
1096  Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1097  : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1098  for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1099  Index begin = m_outerIndex[j];
1100  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1101  Index capacity = m_outerIndex[j + 1] - end;
1102 
1103  bool doInsertion = insertionLocations(j) >= 0;
1104  bool breakUpCopy = doInsertion && (capacity > 0);
1105  // break up copy for sorted insertion into inactive nonzeros
1106  // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1107  // where `threshold >= 0` to skip inactive nonzeros in each vector
1108  // this reduces the total number of copied elements, but requires more moveChunk calls
1109  if (breakUpCopy) {
1110  Index copyBegin = m_outerIndex[j + 1];
1111  Index to = copyBegin + shift;
1112  Index chunkSize = copyEnd - copyBegin;
1113  m_data.moveChunk(copyBegin, to, chunkSize);
1114  copyEnd = end;
1115  }
1116 
1117  m_outerIndex[j + 1] += shift;
1118 
1119  if (doInsertion) {
1120  // if there is capacity, shift into the inactive nonzeros
1121  if (capacity > 0) shift++;
1122  Index copyBegin = insertionLocations(j);
1123  Index to = copyBegin + shift;
1124  Index chunkSize = copyEnd - copyBegin;
1125  m_data.moveChunk(copyBegin, to, chunkSize);
1126  Index dst = to - 1;
1127  m_data.index(dst) = StorageIndex(j);
1128  m_data.value(dst) = Scalar(0);
1129  assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1130  if (!isCompressed()) m_innerNonZeros[j]++;
1131  shift--;
1132  deferredInsertions--;
1133  copyEnd = copyBegin;
1134  }
1135  }
1136  }
1137  eigen_assert((shift == 0) && (deferredInsertions == 0));
1138  }
1139  }
1140 
1141  /* These functions are used to avoid a redundant binary search operation in functions such as coeffRef() and assume `dst` is the appropriate sorted insertion point */
1142  EIGEN_STRONG_INLINE Scalar& insertAtByOuterInner(Index outer, Index inner, Index dst);
1145 
1146 private:
1147  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1148  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS)
1149 
1150  struct default_prunning_func {
1151  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1152  inline bool operator() (const Index&, const Index&, const Scalar& value) const
1153  {
1154  return !internal::isMuchSmallerThan(value, reference, epsilon);
1155  }
1156  Scalar reference;
1157  RealScalar epsilon;
1158  };
1159 };
1160 
1161 namespace internal {
1162 
1163 // Creates a compressed sparse matrix from a range of unsorted triplets
1164 // Requires temporary storage to handle duplicate entries
1165 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1166 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1167  DupFunctor dup_func) {
1168  constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1169  using StorageIndex = typename SparseMatrixType::StorageIndex;
1170  using IndexMap = typename VectorX<StorageIndex>::AlignedMapType;
1172 
1173  if (begin == end) return;
1174 
1175  // There are two strategies to consider for constructing a matrix from unordered triplets:
1176  // A) construct the 'mat' in its native storage order and sort in-place (less memory); or,
1177  // B) construct the transposed matrix and use an implicit sort upon assignment to `mat` (less time).
1178  // This routine uses B) for faster execution time.
1179  TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1180 
1181  // scan triplets to determine allocation size before constructing matrix
1182  Index nonZeros = 0;
1183  for (InputIterator it(begin); it != end; ++it) {
1184  eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1185  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1187  trmat.outerIndexPtr()[j + 1]++;
1188  nonZeros++;
1189  }
1190 
1191  std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1192  eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1193  trmat.resizeNonZeros(nonZeros);
1194 
1195  // construct temporary array to track insertions (outersize) and collapse duplicates (innersize)
1196  ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
1197  smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1198 
1199  // push triplets to back of each vector
1200  for (InputIterator it(begin); it != end; ++it) {
1201  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1202  StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1203  StorageIndex k = tmp[j];
1204  trmat.data().index(k) = i;
1205  trmat.data().value(k) = it->value();
1206  tmp[j]++;
1207  }
1208 
1209  IndexMap wi(tmp, trmat.innerSize());
1210  trmat.collapseDuplicates(wi, dup_func);
1211  // implicit sorting
1212  mat = trmat;
1213 }
1214 
1215 // Creates a compressed sparse matrix from a sorted range of triplets
1216 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1217 void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1218  DupFunctor dup_func) {
1219  constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1220  using StorageIndex = typename SparseMatrixType::StorageIndex;
1221 
1222  if (begin == end) return;
1223 
1224  constexpr StorageIndex kEmptyIndexValue(-1);
1225  // deallocate inner nonzeros if present and zero outerIndexPtr
1226  mat.resize(mat.rows(), mat.cols());
1227  // use outer indices to count non zero entries (excluding duplicate entries)
1228  StorageIndex previous_j = kEmptyIndexValue;
1229  StorageIndex previous_i = kEmptyIndexValue;
1230  // scan triplets to determine allocation size before constructing matrix
1231  Index nonZeros = 0;
1232  for (InputIterator it(begin); it != end; ++it) {
1233  eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1234  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1235  StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1236  eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1237  // identify duplicates by examining previous location
1238  bool duplicate = (previous_j == j) && (previous_i == i);
1239  if (!duplicate) {
1241  nonZeros++;
1242  mat.outerIndexPtr()[j + 1]++;
1243  previous_j = j;
1244  previous_i = i;
1245  }
1246  }
1247 
1248  // finalize outer indices and allocate memory
1249  std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1250  eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1251  mat.resizeNonZeros(nonZeros);
1252 
1253  previous_i = kEmptyIndexValue;
1254  previous_j = kEmptyIndexValue;
1255  Index back = 0;
1256  for (InputIterator it(begin); it != end; ++it) {
1257  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1258  StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1259  bool duplicate = (previous_j == j) && (previous_i == i);
1260  if (duplicate) {
1261  mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1262  } else {
1263  // push triplets to back
1264  mat.data().index(back) = i;
1265  mat.data().value(back) = it->value();
1266  previous_j = j;
1267  previous_i = i;
1268  back++;
1269  }
1270  }
1271  eigen_assert(back == nonZeros);
1272  // matrix is finalized
1273 }
1274 
1275 // thin wrapper around a generic binary functor to use the sparse disjunction evaulator instead of the default "arithmetic" evaulator
1276 template<typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar>
1277 struct scalar_disjunction_op
1278 {
1279  using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1280  scalar_disjunction_op(const DupFunctor& op) : m_functor(op) {}
1281  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return m_functor(a, b); }
1282  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor& functor() const { return m_functor; }
1283  const DupFunctor& m_functor;
1284 };
1285 
1286 template <typename DupFunctor, typename LhsScalar, typename RhsScalar>
1287 struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> : public functor_traits<DupFunctor> {};
1288 
1289 // Creates a compressed sparse matrix from its existing entries and those from an unsorted range of triplets
1290 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1291 void insert_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1292  DupFunctor dup_func) {
1293  using Scalar = typename SparseMatrixType::Scalar;
1294  using SrcXprType = CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1295 
1296  // set_from_triplets is necessary to sort the inner indices and remove the duplicate entries
1297  SparseMatrixType trips(mat.rows(), mat.cols());
1298  set_from_triplets(begin, end, trips, dup_func);
1299 
1300  SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1301  // the sparse assignment procedure creates a temporary matrix and swaps the final result
1302  assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1303 }
1304 
1305 // Creates a compressed sparse matrix from its existing entries and those from an sorted range of triplets
1306 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1307 void insert_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1308  DupFunctor dup_func) {
1309  using Scalar = typename SparseMatrixType::Scalar;
1310  using SrcXprType = CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1311 
1312  // TODO: process triplets without making a copy
1313  SparseMatrixType trips(mat.rows(), mat.cols());
1314  set_from_triplets_sorted(begin, end, trips, dup_func);
1315 
1316  SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1317  // the sparse assignment procedure creates a temporary matrix and swaps the final result
1318  assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1319 }
1320 
1321 } // namespace internal
1322 
1360 template<typename Scalar, int Options_, typename StorageIndex_>
1361 template<typename InputIterators>
1362 void SparseMatrix<Scalar,Options_,StorageIndex_>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1363 {
1364  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1365 }
1366 
1376 template<typename Scalar, int Options_, typename StorageIndex_>
1377 template<typename InputIterators, typename DupFunctor>
1378 void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1379 {
1380  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1381 }
1382 
1390 template<typename Scalar, int Options_, typename StorageIndex_>
1391 template<typename InputIterators>
1392 void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin, const InputIterators& end)
1393 {
1394  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1395 }
1396 
1406 template<typename Scalar, int Options_, typename StorageIndex_>
1407 template<typename InputIterators, typename DupFunctor>
1408 void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1409 {
1410  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1411 }
1412 
1450 template<typename Scalar, int Options_, typename StorageIndex_>
1451 template<typename InputIterators>
1452 void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromTriplets(const InputIterators& begin, const InputIterators& end)
1453 {
1454  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1455 }
1456 
1466 template<typename Scalar, int Options_, typename StorageIndex_>
1467 template<typename InputIterators, typename DupFunctor>
1468 void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1469 {
1470  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1471 }
1472 
1480 template<typename Scalar, int Options_, typename StorageIndex_>
1481 template<typename InputIterators>
1482 void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end)
1483 {
1484  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1485 }
1486 
1496 template<typename Scalar, int Options_, typename StorageIndex_>
1497 template<typename InputIterators, typename DupFunctor>
1498 void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1499 {
1500  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1501 }
1502 
1504 template <typename Scalar_, int Options_, typename StorageIndex_>
1505 template <typename Derived, typename DupFunctor>
1507  // removes duplicate entries and compresses the matrix
1508  // the excess allocated memory is not released
1509  // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1510  eigen_assert(wi.size() == m_innerSize);
1511  constexpr StorageIndex kEmptyIndexValue(-1);
1512  wi.setConstant(kEmptyIndexValue);
1513  StorageIndex count = 0;
1514  const bool is_compressed = isCompressed();
1515  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1516  for (Index j = 0; j < m_outerSize; ++j) {
1517  const StorageIndex newBegin = count;
1518  const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1519  for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1520  StorageIndex i = m_data.index(k);
1521  if (wi(i) >= newBegin) {
1522  // entry at k is a duplicate
1523  // accumulate it into the primary entry located at wi(i)
1524  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1525  } else {
1526  // k is the primary entry in j with inner index i
1527  // shift it to the left and record its location at wi(i)
1528  m_data.index(count) = i;
1529  m_data.value(count) = m_data.value(k);
1530  wi(i) = count;
1531  ++count;
1532  }
1533  }
1534  m_outerIndex[j] = newBegin;
1535  }
1536  m_outerIndex[m_outerSize] = count;
1537  m_data.resize(count);
1538 
1539  // turn the matrix into compressed form (if it is not already)
1540  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1541  m_innerNonZeros = 0;
1542 }
1543 
1545 template<typename Scalar, int Options_, typename StorageIndex_>
1546 template<typename OtherDerived>
1548 {
1549  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1550  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1551 
1552  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1553  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1554  #endif
1555 
1556  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1557  if (needToTranspose)
1558  {
1559  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1560  EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1561  #endif
1562  // two passes algorithm:
1563  // 1 - compute the number of coeffs per dest inner vector
1564  // 2 - do the actual copy/eval
1565  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1566  typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1567  typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1568  typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1569  OtherCopy otherCopy(other.derived());
1570  OtherCopyEval otherCopyEval(otherCopy);
1571 
1572  SparseMatrix dest(other.rows(),other.cols());
1573  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1574 
1575  // pass 1
1576  // FIXME the above copy could be merged with that pass
1577  for (Index j=0; j<otherCopy.outerSize(); ++j)
1578  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1579  ++dest.m_outerIndex[it.index()];
1580 
1581  // prefix sum
1582  StorageIndex count = 0;
1583  IndexVector positions(dest.outerSize());
1584  for (Index j=0; j<dest.outerSize(); ++j)
1585  {
1586  StorageIndex tmp = dest.m_outerIndex[j];
1587  dest.m_outerIndex[j] = count;
1588  positions[j] = count;
1589  count += tmp;
1590  }
1591  dest.m_outerIndex[dest.outerSize()] = count;
1592  // alloc
1593  dest.m_data.resize(count);
1594  // pass 2
1595  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1596  {
1597  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1598  {
1599  Index pos = positions[it.index()]++;
1600  dest.m_data.index(pos) = j;
1601  dest.m_data.value(pos) = it.value();
1602  }
1603  }
1604  this->swap(dest);
1605  return *this;
1606  }
1607  else
1608  {
1609  if(other.isRValue())
1610  {
1611  initAssignment(other.derived());
1612  }
1613  // there is no special optimization
1614  return Base::operator=(other.derived());
1615  }
1616 }
1617 
1618 template <typename Scalar_, int Options_, typename StorageIndex_>
1621  return insertByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
1622 }
1623 
1624 template <typename Scalar_, int Options_, typename StorageIndex_>
1625 EIGEN_STRONG_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1627  // random insertion into compressed matrix is very slow
1628  uncompress();
1629  return insertUncompressedAtByOuterInner(outer, inner, dst);
1630 }
1631 
1632 template <typename Scalar_, int Options_, typename StorageIndex_>
1635  eigen_assert(!isCompressed());
1636  Index outer = IsRowMajor ? row : col;
1637  Index inner = IsRowMajor ? col : row;
1638  Index start = m_outerIndex[outer];
1639  Index end = start + m_innerNonZeros[outer];
1640  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1641  if (dst == end) {
1642  Index capacity = m_outerIndex[outer + 1] - end;
1643  if (capacity > 0) {
1644  // implies uncompressed: push to back of vector
1645  m_innerNonZeros[outer]++;
1646  m_data.index(end) = StorageIndex(inner);
1647  m_data.value(end) = Scalar(0);
1648  return m_data.value(end);
1649  }
1650  }
1651  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1652  "you cannot insert an element that already exists, you must call coeffRef to this end");
1653  return insertUncompressedAtByOuterInner(outer, inner, dst);
1654 }
1655 
1656 template <typename Scalar_, int Options_, typename StorageIndex_>
1659  eigen_assert(isCompressed());
1660  Index outer = IsRowMajor ? row : col;
1661  Index inner = IsRowMajor ? col : row;
1662  Index start = m_outerIndex[outer];
1663  Index end = m_outerIndex[outer + 1];
1664  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1665  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1666  "you cannot insert an element that already exists, you must call coeffRef to this end");
1667  return insertCompressedAtByOuterInner(outer, inner, dst);
1668 }
1669 
1670 template <typename Scalar_, int Options_, typename StorageIndex_>
1673  eigen_assert(isCompressed());
1674  // compressed insertion always requires expanding the buffer
1675  // first, check if there is adequate allocated memory
1676  if (m_data.allocatedSize() <= m_data.size()) {
1677  // if there is no capacity for a single insertion, double the capacity
1678  // increase capacity by a mininum of 32
1679  Index minReserve = 32;
1680  Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1681  m_data.reserve(reserveSize);
1682  }
1683  m_data.resize(m_data.size() + 1);
1684  Index chunkSize = m_outerIndex[m_outerSize] - dst;
1685  // shift the existing data to the right if necessary
1686  m_data.moveChunk(dst, dst + 1, chunkSize);
1687  // update nonzero counts
1688  // potentially O(outerSize) bottleneck!
1689  for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1690  // initialize the coefficient
1691  m_data.index(dst) = StorageIndex(inner);
1692  m_data.value(dst) = Scalar(0);
1693  // return a reference to the coefficient
1694  return m_data.value(dst);
1695 }
1696 
1697 template <typename Scalar_, int Options_, typename StorageIndex_>
1700  eigen_assert(!isCompressed());
1701  // find a vector with capacity, starting at `outer` and searching to the left and right
1702  for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1703  if (rightTarget < m_outerSize) {
1704  Index start = m_outerIndex[rightTarget];
1705  Index end = start + m_innerNonZeros[rightTarget];
1706  Index nextStart = m_outerIndex[rightTarget + 1];
1707  Index capacity = nextStart - end;
1708  if (capacity > 0) {
1709  // move [dst, end) to dst+1 and insert at dst
1710  Index chunkSize = end - dst;
1711  if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1712  m_innerNonZeros[outer]++;
1713  for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1714  m_data.index(dst) = StorageIndex(inner);
1715  m_data.value(dst) = Scalar(0);
1716  return m_data.value(dst);
1717  }
1718  rightTarget++;
1719  }
1720  if (leftTarget >= 0) {
1721  Index start = m_outerIndex[leftTarget];
1722  Index end = start + m_innerNonZeros[leftTarget];
1723  Index nextStart = m_outerIndex[leftTarget + 1];
1724  Index capacity = nextStart - end;
1725  if (capacity > 0) {
1726  // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1727  // move [nextStart, dst) to nextStart-1 and insert at dst-1
1728  Index chunkSize = dst - nextStart;
1729  if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1730  m_innerNonZeros[outer]++;
1731  for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1732  m_data.index(dst - 1) = StorageIndex(inner);
1733  m_data.value(dst - 1) = Scalar(0);
1734  return m_data.value(dst - 1);
1735  }
1736  leftTarget--;
1737  }
1738  }
1739 
1740  // no room for interior insertion
1741  // nonZeros() == m_data.size()
1742  // record offset as outerIndxPtr will change
1743  Index dst_offset = dst - m_outerIndex[outer];
1744  // allocate space for random insertion
1745  if (m_data.allocatedSize() == 0) {
1746  // fast method to allocate space for one element per vector in empty matrix
1747  m_data.resize(m_outerSize);
1748  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1749  } else {
1750  // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1751  Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1752  eigen_assert(maxReserveSize > 0);
1753  if (m_outerSize <= maxReserveSize) {
1754  // allocate space for one additional element per vector
1755  reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1756  } else {
1757  // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1758  // allocate space for one additional element in the interval [outer,maxReserveSize)
1759  typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1760  typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1761  ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1762  reserveInnerVectors(reserveSizesXpr);
1763  }
1764  }
1765  // insert element at `dst` with new outer indices
1766  Index start = m_outerIndex[outer];
1767  Index end = start + m_innerNonZeros[outer];
1768  Index new_dst = start + dst_offset;
1769  Index chunkSize = end - new_dst;
1770  if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1771  m_innerNonZeros[outer]++;
1772  m_data.index(new_dst) = StorageIndex(inner);
1773  m_data.value(new_dst) = Scalar(0);
1774  return m_data.value(new_dst);
1775 }
1776 
1777 namespace internal {
1778 
1779  template <typename Scalar_, int Options_, typename StorageIndex_>
1780  struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1781  : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1782  typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1783  typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
1784  evaluator() : Base() {}
1785  explicit evaluator(const SparseMatrixType& mat) : Base(mat) {}
1786  };
1787 
1788 }
1789 
1790 // Specialization for SparseMatrix.
1791 // Serializes [rows, cols, isCompressed, outerSize, innerBufferSize,
1792 // innerNonZeros, outerIndices, innerIndices, values].
1793 template <typename Scalar, int Options, typename StorageIndex>
1794 class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1795  public:
1797 
1798  struct Header {
1804  };
1805 
1806  EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
1807  // innerNonZeros.
1808  std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1809  // Outer indices.
1810  num_storage_indices += value.outerSize() + 1;
1811  // Inner indices.
1812  const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1813  num_storage_indices += inner_buffer_size;
1814  // Values.
1815  std::size_t num_values = inner_buffer_size;
1816  return sizeof(Header) + sizeof(Scalar) * num_values +
1817  sizeof(StorageIndex) * num_storage_indices;
1818  }
1819 
1821  const SparseMat& value) {
1822  if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
1823  if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
1824 
1825  const size_t header_bytes = sizeof(Header);
1826  Header header = {value.rows(), value.cols(), value.isCompressed(),
1827  value.outerSize(), value.outerIndexPtr()[value.outerSize()]};
1828  EIGEN_USING_STD(memcpy)
1829  memcpy(dest, &header, header_bytes);
1830  dest += header_bytes;
1831 
1832  // innerNonZeros.
1833  if (!header.compressed) {
1834  std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1835  memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1836  dest += data_bytes;
1837  }
1838 
1839  // Outer indices.
1840  std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1841  memcpy(dest, value.outerIndexPtr(), data_bytes);
1842  dest += data_bytes;
1843 
1844  // Inner indices.
1845  data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1846  memcpy(dest, value.innerIndexPtr(), data_bytes);
1847  dest += data_bytes;
1848 
1849  // Values.
1850  data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1851  memcpy(dest, value.valuePtr(), data_bytes);
1852  dest += data_bytes;
1853 
1854  return dest;
1855  }
1856 
1858  const uint8_t* end,
1859  SparseMat& value) const {
1860  if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
1861  if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
1862 
1863  const size_t header_bytes = sizeof(Header);
1864  Header header;
1865  EIGEN_USING_STD(memcpy)
1866  memcpy(&header, src, header_bytes);
1867  src += header_bytes;
1868 
1869  value.setZero();
1870  value.resize(header.rows, header.cols);
1871  if (header.compressed) {
1872  value.makeCompressed();
1873  } else {
1874  value.uncompress();
1875  }
1876 
1877  // Adjust value ptr size.
1878  value.data().resize(header.inner_buffer_size);
1879 
1880  // Initialize compressed state and inner non-zeros.
1881  if (!header.compressed) {
1882  // Inner non-zero counts.
1883  std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1884  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1885  memcpy(value.innerNonZeroPtr(), src, data_bytes);
1886  src += data_bytes;
1887  }
1888 
1889  // Outer indices.
1890  std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1891  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1892  memcpy(value.outerIndexPtr(), src, data_bytes);
1893  src += data_bytes;
1894 
1895  // Inner indices.
1896  data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1897  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1898  memcpy(value.innerIndexPtr(), src, data_bytes);
1899  src += data_bytes;
1900 
1901  // Values.
1902  data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1903  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1904  memcpy(value.valuePtr(), src, data_bytes);
1905  src += data_bytes;
1906  return src;
1907  }
1908 };
1909 
1910 } // end namespace Eigen
1911 
1912 #endif // EIGEN_SPARSEMATRIX_H
Matrix3f m
Array< int, Dynamic, 1 > v
Array< int, 3, 1 > b
int n
RowXpr row(Index i)
This is the const version of row(). *‍/.
ColXpr col(Index i)
This is the const version of col().
if((m *x).isApprox(y))
IndexedView_or_Block operator()(const RowIndices &rowIndices, const ColIndices &colIndices)
#define EIGEN_DEPRECATED
Definition: Macros.h:923
#define EIGEN_USING_STD(FUNC)
Definition: Macros.h:1080
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define EIGEN_PREDICT_FALSE(x)
Definition: Macros.h:1177
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:957
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:883
#define EIGEN_DONT_INLINE
Definition: Macros.h:844
#define eigen_assert(x)
Definition: Macros.h:902
v resize(3)
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:847
A insert(1, 2)=0
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:20
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:45
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
float * p
Matrix< float, 1, Dynamic > MatrixType
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:86
Generic expression of a matrix where all coefficients are defined by a functor.
Base class for all dense matrices, vectors, and arrays.
Definition: DenseBase.h:42
void resize(Index newSize)
Definition: DenseBase.h:235
Derived & setConstant(const Scalar &value)
EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT
Definition: EigenBase.h:69
Base class for diagonal matrices and expressions.
const Derived & derived() const
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:67
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:98
void evalTo(Dest &dst) const
Definition: ReturnByValue.h:63
uint8_t * serialize(uint8_t *dest, uint8_t *end, const SparseMat &value)
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, SparseMat &value) const
Common base class for sparse [compressed]-{row|column}-storage format.
Derived & operator=(const Derived &other)
Definition: SparseAssign.h:45
Base class of any sparse matrices or sparse expressions.
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
static StorageIndex convert_index(const Index idx)
StorageIndex operator[](Index i) const
Definition: SparseMatrix.h:997
A versatible sparse matrix representation.
Definition: SparseMatrix.h:125
EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
Scalar & insertBackUncompressed(Index row, Index col)
Base::IndexVector IndexVector
Definition: SparseMatrix.h:151
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:217
Scalar & insertCompressedAtByOuterInner(Index outer, Index inner, Index dst)
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:190
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:208
const Storage & data() const
Definition: SparseMatrix.h:213
void collapseDuplicates(DenseBase< Derived > &wi, DupFunctor dup_func=DupFunctor())
EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
EIGEN_DONT_INLINE SparseMatrix & operator=(const SparseMatrixBase< OtherDerived > &other)
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:764
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:844
Scalar & insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst)
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
Index cols() const
Definition: SparseMatrix.h:167
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:827
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
SparseMatrix & operator=(SparseMatrix &&other)
Definition: SparseMatrix.h:894
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
void startVec(Index outer)
Definition: SparseMatrix.h:447
Index outerSize() const
Definition: SparseMatrix.h:172
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:158
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:425
void insertEmptyOuterVectors(Index j, Index num=1)
Definition: SparseMatrix.h:509
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:332
SparseMatrix< Scalar, IsRowMajor ? ColMajor :RowMajor, StorageIndex > TransposedSparseMatrix
Definition: SparseMatrix.h:154
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:204
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:566
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:199
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:235
const Scalar * valuePtr() const
Definition: SparseMatrix.h:177
void initAssignment(const Other &other)
Definition: SparseMatrix.h:974
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
EIGEN_STATIC_ASSERT((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:807
Scalar & insertAtByOuterInner(Index outer, Index inner, Index dst)
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:739
Index rows() const
Definition: SparseMatrix.h:165
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:819
internal::CompressedStorage< Scalar, StorageIndex > Storage
Definition: SparseMatrix.h:146
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:159
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:836
Index innerSize() const
Definition: SparseMatrix.h:170
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:780
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:686
void insertFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:635
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:758
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:139
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:788
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition: SparseMatrix.h:140
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:126
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:648
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:869
void removeOuterVectors(Index j, Index num=1)
Definition: SparseMatrix.h:476
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:195
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:418
void reserve(Index reserveSize)
Definition: SparseMatrix.h:300
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:186
Scalar & insert(Index row, Index col)
void reserve(const SizesType &reserveSizes)
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:770
friend std::ostream & operator<<(std::ostream &s, const SparseMatrix &m)
Definition: SparseMatrix.h:911
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:437
Base::ScalarVector ScalarVector
Definition: SparseMatrix.h:152
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
SparseMatrix(SparseMatrix &&other)
Definition: SparseMatrix.h:813
Eigen::Map< SparseMatrix< Scalar, Options_, StorageIndex > > Map
Definition: SparseMatrix.h:138
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
a sparse vector class
Definition: SparseVector.h:68
static const lastp1_t end
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323
const unsigned int LvalueBit
Definition: Constants.h:146
const unsigned int RowMajorBit
Definition: Constants.h:68
const unsigned int CompressedAccessBit
Definition: Constants.h:193
EIGEN_CONSTEXPR void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
typename remove_all< T >::type remove_all_t
Definition: Meta.h:119
void throw_std_bad_alloc()
Definition: Memory.h:117
IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:64
void insert_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void insert_from_triplets_sorted(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void smart_memmove(const T *start, const T *end, T *target)
Definition: Memory.h:625
void set_from_triplets_sorted(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:601
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:788
std::uint8_t uint8_t
Definition: Meta.h:35
EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
: InteropHeaders
Definition: Core:139
const unsigned int NestByRefBit
Definition: Constants.h:171
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const int InnerRandomAccessPattern
Definition: SparseUtil.h:50
const int Dynamic
Definition: Constants.h:24
Derived & derived()
Definition: EigenBase.h:48
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41
Derived & const_cast_derived() const
Definition: EigenBase.h:54
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:231
IndexPosPair(Index a_i, Index a_p)
std::ptrdiff_t j