Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ > Class Template Reference

A versatible sparse matrix representation. More...

+ Inheritance diagram for Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >:

Classes

struct  IndexPosPair
 
class  SingletonVector
 

Public Types

enum  { Options }
 
typedef Diagonal< const SparseMatrixConstDiagonalReturnType
 
typedef Diagonal< SparseMatrixDiagonalReturnType
 
typedef Base::IndexVector IndexVector
 
typedef Eigen::Map< SparseMatrix< Scalar, Options_, StorageIndex > > Map
 
typedef Base::ScalarVector ScalarVector
 
typedef internal::CompressedStorage< Scalar, StorageIndexStorage
 
- Public Types inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
typedef SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > > Base
 
- Public Types inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
enum  
 
typedef std::conditional_t< NumTraits< Scalar >::IsComplex, CwiseUnaryOp< internal::scalar_conjugate_op< Scalar >, Eigen::Transpose< const SparseMatrix< Scalar_, Options_, StorageIndex_ > > >, Transpose< const SparseMatrix< Scalar_, Options_, StorageIndex_ > > > AdjointReturnType
 
typedef Transpose< const SparseMatrix< Scalar_, Options_, StorageIndex_ > > ConstTransposeReturnType
 
typedef Matrix< StorageIndex, Dynamic, 1 > IndexVector
 
typedef internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits< Scalar >::type >::type PacketReturnType
 
typedef internal::packet_traits< Scalar >::type PacketScalar
 
typedef SparseMatrix< Scalar, Flags &RowMajorBit ? RowMajor :ColMajor, StorageIndexPlainObject
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
 
typedef Matrix< Scalar, Dynamic, 1 > ScalarVector
 
typedef SparseMatrixBase StorageBaseType
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageKind StorageKind
 
typedef Transpose< SparseMatrix< Scalar_, Options_, StorageIndex_ > > TransposeReturnType
 
typedef Scalar value_type
 
- Public Types inherited from Eigen::EigenBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
typedef Eigen::Index Index
 The interface type of indices. More...
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageKind StorageKind
 

Public Member Functions

Scalar coeff (Index row, Index col) const
 
ScalarcoeffRef (Index row, Index col)
 
template<typename Derived , typename DupFunctor >
void collapseDuplicates (DenseBase< Derived > &wi, DupFunctor dup_func=DupFunctor())
 
Index cols () const
 
void conservativeResize (Index rows, Index cols)
 
Storagedata ()
 
const Storagedata () const
 
DiagonalReturnType diagonal ()
 
const ConstDiagonalReturnType diagonal () const
 
void finalize ()
 
StorageIndexinnerIndexPtr ()
 
const StorageIndexinnerIndexPtr () const
 
StorageIndexinnerNonZeroPtr ()
 
const StorageIndexinnerNonZeroPtr () const
 
Index innerSize () const
 
Scalarinsert (Index row, Index col)
 
ScalarinsertBack (Index row, Index col)
 
ScalarinsertBackByOuterInner (Index outer, Index inner)
 
ScalarinsertBackByOuterInnerUnordered (Index outer, Index inner)
 
ScalarinsertBackUncompressed (Index row, Index col)
 
ScalarinsertByOuterInner (Index j, Index i)
 
void insertEmptyOuterVectors (Index j, Index num=1)
 
template<typename InputIterators >
void insertFromSortedTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void insertFromSortedTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
template<typename InputIterators >
void insertFromTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void insertFromTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
bool isCompressed () const
 
void makeCompressed ()
 
Index nonZeros () const
 
SparseMatrixoperator= (const SparseMatrix &other)
 
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrixoperator= (const SparseMatrixBase< OtherDerived > &other)
 
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrix< Scalar, Options_, StorageIndex_ > & operator= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrixoperator= (SparseMatrix &&other)
 
StorageIndexouterIndexPtr ()
 
const StorageIndexouterIndexPtr () const
 
Index outerSize () const
 
template<typename KeepFunc >
void prune (const KeepFunc &keep=KeepFunc())
 
void prune (const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
 
void removeOuterVectors (Index j, Index num=1)
 
template<class SizesType >
void reserve (const SizesType &reserveSizes)
 
void reserve (Index reserveSize)
 
void resize (Index rows, Index cols)
 
void resizeNonZeros (Index size)
 
Index rows () const
 
template<typename InputIterators >
void setFromSortedTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void setFromSortedTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
template<typename InputIterators >
void setFromTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void setFromTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
void setIdentity ()
 
void setZero ()
 
 SparseMatrix ()
 
template<typename OtherDerived >
 SparseMatrix (const DiagonalBase< OtherDerived > &other)
 Copy constructor with in-place evaluation. More...
 
template<typename OtherDerived >
 SparseMatrix (const ReturnByValue< OtherDerived > &other)
 Copy constructor with in-place evaluation. More...
 
 SparseMatrix (const SparseMatrix &other)
 
template<typename OtherDerived >
 SparseMatrix (const SparseMatrixBase< OtherDerived > &other)
 
template<typename OtherDerived , unsigned int UpLo>
 SparseMatrix (const SparseSelfAdjointView< OtherDerived, UpLo > &other)
 
 SparseMatrix (Index rows, Index cols)
 
 SparseMatrix (SparseMatrix &&other)
 
void startVec (Index outer)
 
Scalar sum () const
 
void swap (SparseMatrix &other)
 
void uncompress ()
 
ScalarvaluePtr ()
 
const ScalarvaluePtr () const
 
 ~SparseMatrix ()
 
- Public Member Functions inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
Map< Array< Scalar, Dynamic, 1 > > coeffs ()
 
const Map< const Array< Scalar, Dynamic, 1 > > coeffs () const
 
StorageIndexinnerIndexPtr ()
 
const StorageIndexinnerIndexPtr () const
 
Index innerIndicesAreSorted () const
 
Index innerIndicesAreSorted (Index begin, Index end) const
 
StorageIndexinnerNonZeroPtr ()
 
const StorageIndexinnerNonZeroPtr () const
 
bool isCompressed () const
 
Index nonZeros () const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const EigenBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const ReturnByValue< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrix< Scalar_, Options_, StorageIndex_ > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrixBase< OtherDerived > &other)
 
StorageIndexouterIndexPtr ()
 
const StorageIndexouterIndexPtr () const
 
void sortInnerIndices ()
 
void sortInnerIndices (Index begin, Index end)
 
ScalarvaluePtr ()
 
const ScalarvaluePtr () const
 
- Public Member Functions inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
const AdjointReturnType adjoint () const
 
RealScalar blueNorm () const
 
Index cols () const
 
const SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::template CwiseProductDenseReturnType< OtherDerived >::Type cwiseProduct (const MatrixBase< OtherDerived > &other) const
 
const CwiseProductDenseReturnType< OtherDerived >::Type cwiseProduct (const MatrixBase< OtherDerived > &other) const
 
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar dot (const MatrixBase< OtherDerived > &other) const
 
Scalar dot (const MatrixBase< OtherDerived > &other) const
 
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar dot (const SparseMatrixBase< OtherDerived > &other) const
 
Scalar dot (const SparseMatrixBase< OtherDerived > &other) const
 
const internal::eval< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::type eval () const
 
Index innerSize () const
 
bool isApprox (const MatrixBase< OtherDerived > &other, const RealScalar &prec=NumTraits< Scalar >::dummy_precision()) const
 
bool isApprox (const SparseMatrixBase< OtherDerived > &other, const RealScalar &prec=NumTraits< Scalar >::dummy_precision()) const
 
bool isRValue () const
 
bool isVector () const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & markAsRValue ()
 
RealScalar norm () const
 
const Product< SparseMatrix< Scalar_, Options_, StorageIndex_ >, OtherDerived > operator* (const DiagonalBase< OtherDerived > &other) const
 
const Product< SparseMatrix< Scalar_, Options_, StorageIndex_ >, OtherDerived > operator* (const MatrixBase< OtherDerived > &other) const
 
const Product< SparseMatrix< Scalar_, Options_, StorageIndex_ >, OtherDerived, AliasFreeProduct > operator* (const SparseMatrixBase< OtherDerived > &other) const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator*= (const Scalar &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator*= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const DiagonalBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const EigenBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const DiagonalBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const EigenBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator/= (const Scalar &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const EigenBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const ReturnByValue< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrix< Scalar_, Options_, StorageIndex_ > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrixBase< OtherDerived > &other)
 
Index outerSize () const
 
const SparseView< SparseMatrix< Scalar_, Options_, StorageIndex_ > > pruned (const Scalar &reference=Scalar(0), const RealScalar &epsilon=NumTraits< Scalar >::dummy_precision()) const
 
Index rows () const
 
SelfAdjointViewReturnType< UpLo >::Type selfadjointView ()
 
SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::template SelfAdjointViewReturnType< UpLo >::Type selfadjointView ()
 
ConstSelfAdjointViewReturnType< UpLo >::Type selfadjointView () const
 
SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::template ConstSelfAdjointViewReturnType< UpLo >::Type selfadjointView () const
 
Index size () const
 
 SparseMatrixBase ()
 
RealScalar squaredNorm () const
 
Scalar sum () const
 
DenseMatrixType toDense () const
 
TransposeReturnType transpose ()
 
const ConstTransposeReturnType transpose () const
 
const TriangularView< const SparseMatrix< Scalar_, Options_, StorageIndex_ >, Mode > triangularView () const
 
SparseSymmetricPermutationProduct< SparseMatrix< Scalar_, Options_, StorageIndex_ >, Upper|Lower > twistedBy (const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
 
- Public Member Functions inherited from Eigen::EigenBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
void addTo (Dest &dst) const
 
void applyThisOnTheLeft (Dest &dst) const
 
void applyThisOnTheRight (Dest &dst) const
 
EIGEN_CONSTEXPR Index cols () const EIGEN_NOEXCEPT
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & const_cast_derived () const
 
const SparseMatrix< Scalar_, Options_, StorageIndex_ > & const_derived () const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived ()
 
const SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived () const
 
void evalTo (Dest &dst) const
 
EIGEN_CONSTEXPR Index rows () const EIGEN_NOEXCEPT
 
EIGEN_CONSTEXPR Index size () const EIGEN_NOEXCEPT
 
void subTo (Dest &dst) const
 

Protected Types

typedef SparseMatrix< Scalar, IsRowMajor ? ColMajor :RowMajor, StorageIndexTransposedSparseMatrix
 
- Protected Types inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
typedef Base::IndexVector IndexVector
 

Protected Member Functions

template<typename DiagXpr , typename Func >
void assignDiagonal (const DiagXpr diagXpr, const Func &assignFunc)
 
template<typename Other >
void initAssignment (const Other &other)
 
ScalarinsertAtByOuterInner (Index outer, Index inner, Index dst)
 
EIGEN_DEPRECATED EIGEN_DONT_INLINE ScalarinsertCompressed (Index row, Index col)
 
ScalarinsertCompressedAtByOuterInner (Index outer, Index inner, Index dst)
 
EIGEN_DEPRECATED EIGEN_DONT_INLINE ScalarinsertUncompressed (Index row, Index col)
 
ScalarinsertUncompressedAtByOuterInner (Index outer, Index inner, Index dst)
 
template<class SizesType >
void reserveInnerVectors (const SizesType &reserveSizes)
 
- Protected Member Functions inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
Eigen::Map< IndexVectorinnerNonZeros ()
 
const Eigen::Map< const IndexVectorinnerNonZeros () const
 
internal::LowerBoundIndex lower_bound (Index row, Index col) const
 
 SparseCompressedBase ()
 
- Protected Member Functions inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
SparseMatrix< Scalar_, Options_, StorageIndex_ > & assign (const OtherDerived &other)
 
void assignGeneric (const OtherDerived &other)
 

Protected Attributes

Storage m_data
 
StorageIndexm_innerNonZeros
 
Index m_innerSize
 
StorageIndexm_outerIndex
 
Index m_outerSize
 
- Protected Attributes inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
bool m_isRValue
 

Private Types

typedef SparseCompressedBase< SparseMatrixBase
 

Private Member Functions

 EIGEN_STATIC_ASSERT ((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
 

Additional Inherited Members

- Static Protected Member Functions inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
static StorageIndex convert_index (const Index idx)
 

Detailed Description

template<typename Scalar_, int Options_, typename StorageIndex_>
class Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >

A versatible sparse matrix representation.

This class implements a more versatile variants of the common compressed row/column storage format. Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. All the non zeros are stored in a single large buffer. Unlike the compressed format, there might be extra space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero can be done with limited memory reallocation and copies.

A call to the function makeCompressed() turns the matrix into the standard compressed format compatible with many library.

More details on this storage sceheme are given in the manual pages.

Template Parameters
Scalar_the scalar type, i.e. the type of the coefficients
Options_Union of bit flags controlling the storage scheme. Currently the only possibility is ColMajor or RowMajor. The default is 0 which means column-major.
StorageIndex_the type of the indices. It has to be a signed type (e.g., short, int, std::ptrdiff_t). Default is int.
Warning
In Eigen 3.2, the undocumented type SparseMatrix::Index was improperly defined as the storage index type (e.g., int), whereas it is now (starting from Eigen 3.3) deprecated and always defined as Eigen::Index. Codes making use of SparseMatrix::Index, might thus likely have to be changed to use SparseMatrix::StorageIndex instead.

This class can be extended with the help of the plugin mechanism described on the page Extending MatrixBase (and other classes) by defining the preprocessor symbol EIGEN_SPARSEMATRIX_PLUGIN.

Definition at line 123 of file SparseMatrix.h.

Member Typedef Documentation

◆ Base

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef SparseCompressedBase<SparseMatrix> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::Base
private

Definition at line 126 of file SparseMatrix.h.

◆ ConstDiagonalReturnType

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Diagonal<const SparseMatrix> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::ConstDiagonalReturnType

Definition at line 140 of file SparseMatrix.h.

◆ DiagonalReturnType

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Diagonal<SparseMatrix> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::DiagonalReturnType

Definition at line 139 of file SparseMatrix.h.

◆ IndexVector

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Base::IndexVector Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IndexVector

Definition at line 151 of file SparseMatrix.h.

◆ Map

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Eigen::Map<SparseMatrix<Scalar,Options_,StorageIndex> > Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::Map

Definition at line 138 of file SparseMatrix.h.

◆ ScalarVector

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Base::ScalarVector Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::ScalarVector

Definition at line 152 of file SparseMatrix.h.

◆ Storage

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef internal::CompressedStorage<Scalar,StorageIndex> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::Storage

Definition at line 146 of file SparseMatrix.h.

◆ TransposedSparseMatrix

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef SparseMatrix<Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::TransposedSparseMatrix
protected

Definition at line 154 of file SparseMatrix.h.

Member Enumeration Documentation

◆ anonymous enum

template<typename Scalar_ , int Options_, typename StorageIndex_ >
anonymous enum
Enumerator
Options 

Definition at line 147 of file SparseMatrix.h.

147  {
148  Options = Options_
149  };

Constructor & Destructor Documentation

◆ SparseMatrix() [1/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( )
inline

Default constructor yielding an empty 0 x 0 matrix

Definition at line 773 of file SparseMatrix.h.

775  {
776  resize(0, 0);
777  }
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:158
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:739
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:159

◆ SparseMatrix() [2/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( Index  rows,
Index  cols 
)
inline

Constructs a rows x cols empty matrix

Definition at line 780 of file SparseMatrix.h.

782  {
783  resize(rows, cols);
784  }
Index cols() const
Definition: SparseMatrix.h:167
Index rows() const
Definition: SparseMatrix.h:165

◆ SparseMatrix() [3/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const SparseMatrixBase< OtherDerived > &  other)
inline

Constructs a sparse matrix from the sparse expression other

Definition at line 788 of file SparseMatrix.h.

790  {
791  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
792  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
793  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
794  if (needToTranspose)
795  *this = other.derived();
796  else
797  {
798  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
799  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
800  #endif
801  internal::call_assignment_no_alias(*this, other.derived());
802  }
803  }
EIGEN_STATIC_ASSERT((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
const unsigned int RowMajorBit
Definition: Constants.h:68
EIGEN_CONSTEXPR void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)

◆ SparseMatrix() [4/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived , unsigned int UpLo>
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const SparseSelfAdjointView< OtherDerived, UpLo > &  other)
inline

Constructs a sparse matrix from the sparse selfadjoint view other

Definition at line 807 of file SparseMatrix.h.

809  {
810  Base::operator=(other);
811  }
Derived & operator=(const Derived &other)
Definition: SparseAssign.h:45

◆ SparseMatrix() [5/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &&  other)
inline

Definition at line 813 of file SparseMatrix.h.

814  {
815  *this = other.derived().markAsRValue();
816  }
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:126

◆ SparseMatrix() [6/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const SparseMatrix< Scalar_, Options_, StorageIndex_ > &  other)
inline

Copy constructor (it performs a deep copy)

Definition at line 819 of file SparseMatrix.h.

821  {
822  *this = other.derived();
823  }

◆ SparseMatrix() [7/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const ReturnByValue< OtherDerived > &  other)
inline

Copy constructor with in-place evaluation.

Definition at line 827 of file SparseMatrix.h.

829  {
830  initAssignment(other);
831  other.evalTo(*this);
832  }
void initAssignment(const Other &other)
Definition: SparseMatrix.h:974

◆ SparseMatrix() [8/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const DiagonalBase< OtherDerived > &  other)
inlineexplicit

Copy constructor with in-place evaluation.

Definition at line 836 of file SparseMatrix.h.

838  {
839  *this = other.derived();
840  }

◆ ~SparseMatrix()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::~SparseMatrix ( )
inline

Destructor

Definition at line 958 of file SparseMatrix.h.

959  {
960  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
961  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
962  }

Member Function Documentation

◆ assignDiagonal()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename DiagXpr , typename Func >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::assignDiagonal ( const DiagXpr  diagXpr,
const Func &  assignFunc 
)
inlineprotected

Definition at line 1037 of file SparseMatrix.h.

1038  {
1039 
1040  constexpr StorageIndex kEmptyIndexVal(-1);
1041  typedef typename ScalarVector::AlignedMapType ValueMap;
1042 
1043  Index n = diagXpr.size();
1044 
1045  const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
1046  if(overwrite)
1047  {
1048  if((m_outerSize != n) || (m_innerSize != n))
1049  resize(n, n);
1050  }
1051 
1052  if(m_data.size()==0 || overwrite)
1053  {
1054  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1055  m_innerNonZeros = 0;
1056  resizeNonZeros(n);
1057  ValueMap valueMap(valuePtr(), n);
1058  std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1059  std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1060  valueMap.setZero();
1061  internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1062  }
1063  else
1064  {
1065  internal::evaluator<DiagXpr> diaEval(diagXpr);
1066 
1068  typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1069  insertionLocations.setConstant(kEmptyIndexVal);
1070 
1071  Index deferredInsertions = 0;
1072  Index shift = 0;
1073 
1074  for (Index j = 0; j < n; j++) {
1075  Index begin = m_outerIndex[j];
1076  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1077  Index capacity = m_outerIndex[j + 1] - end;
1078  Index dst = m_data.searchLowerIndex(begin, end, j);
1079  // the entry exists: update it now
1080  if (dst != end && m_data.index(dst) == StorageIndex(j)) assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1081  // the entry belongs at the back of the vector: push to back
1082  else if (dst == end && capacity > 0)
1083  assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1084  // the insertion requires a data move, record insertion location and handle in second pass
1085  else {
1086  insertionLocations.coeffRef(j) = StorageIndex(dst);
1087  deferredInsertions++;
1088  // if there is no capacity, all vectors to the right of this are shifted
1089  if (capacity == 0) shift++;
1090  }
1091  }
1092 
1093  if (deferredInsertions > 0) {
1094 
1095  m_data.resize(m_data.size() + shift);
1098  for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1099  Index begin = m_outerIndex[j];
1100  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1101  Index capacity = m_outerIndex[j + 1] - end;
1102 
1103  bool doInsertion = insertionLocations(j) >= 0;
1104  bool breakUpCopy = doInsertion && (capacity > 0);
1105  // break up copy for sorted insertion into inactive nonzeros
1106  // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1107  // where `threshold >= 0` to skip inactive nonzeros in each vector
1108  // this reduces the total number of copied elements, but requires more moveChunk calls
1109  if (breakUpCopy) {
1110  Index copyBegin = m_outerIndex[j + 1];
1111  Index to = copyBegin + shift;
1112  Index chunkSize = copyEnd - copyBegin;
1113  m_data.moveChunk(copyBegin, to, chunkSize);
1114  copyEnd = end;
1115  }
1116 
1117  m_outerIndex[j + 1] += shift;
1118 
1119  if (doInsertion) {
1120  // if there is capacity, shift into the inactive nonzeros
1121  if (capacity > 0) shift++;
1122  Index copyBegin = insertionLocations(j);
1123  Index to = copyBegin + shift;
1124  Index chunkSize = copyEnd - copyBegin;
1125  m_data.moveChunk(copyBegin, to, chunkSize);
1126  Index dst = to - 1;
1127  m_data.index(dst) = StorageIndex(j);
1128  m_data.value(dst) = Scalar(0);
1129  assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1130  if (!isCompressed()) m_innerNonZeros[j]++;
1131  shift--;
1132  deferredInsertions--;
1133  copyEnd = copyBegin;
1134  }
1135  }
1136  }
1137  eigen_assert((shift == 0) && (deferredInsertions == 0));
1138  }
1139  }
int n
#define eigen_assert(x)
Definition: Macros.h:902
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:847
Eigen::Map< Derived, AlignedMax > AlignedMapType
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
Scalar & insertBackUncompressed(Index row, Index col)
const Scalar * valuePtr() const
Definition: SparseMatrix.h:177
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:758
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:186
static const lastp1_t end
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41
std::ptrdiff_t j

◆ coeff()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeff ( Index  row,
Index  col 
) const
inline
Returns
the value of the matrix at position i, j This function returns Scalar(0) if the element is an explicit zero

Definition at line 217 of file SparseMatrix.h.

218  {
219  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
220 
221  const Index outer = IsRowMajor ? row : col;
222  const Index inner = IsRowMajor ? col : row;
223  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
224  return m_data.atInRange(m_outerIndex[outer], end, inner);
225  }
RowXpr row(Index i)
This is the const version of row(). *‍/.
ColXpr col(Index i)
This is the const version of col().

◆ coeffRef()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeffRef ( Index  row,
Index  col 
)
inline
Returns
a non-const reference to the value of the matrix at position i, j

If the element does not exist then it is inserted via the insert(Index,Index) function which itself turns the matrix into a non compressed form if that was not the case.

This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) function if the element does not already exist.

Definition at line 235 of file SparseMatrix.h.

236  {
237  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
238  const Index outer = IsRowMajor ? row : col;
239  const Index inner = IsRowMajor ? col : row;
240  Index start = m_outerIndex[outer];
241  Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
242  eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
243  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
244  if (dst == end) {
245  Index capacity = m_outerIndex[outer + 1] - end;
246  if (capacity > 0) {
247  // implies uncompressed: push to back of vector
248  m_innerNonZeros[outer]++;
249  m_data.index(end) = StorageIndex(inner);
250  m_data.value(end) = Scalar(0);
251  return m_data.value(end);
252  }
253  }
254  if ((dst < end) && (m_data.index(dst) == inner))
255  // this coefficient exists, return a refernece to it
256  return m_data.value(dst);
257  else
258  // insertion will require reconfiguring the buffer
259  return insertAtByOuterInner(outer, inner, dst);
260  }
Scalar & insertAtByOuterInner(Index outer, Index inner, Index dst)

◆ collapseDuplicates()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename Derived , typename DupFunctor >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::collapseDuplicates ( DenseBase< Derived > &  wi,
DupFunctor  dup_func = DupFunctor() 
)

Definition at line 1506 of file SparseMatrix.h.

1506  {
1507  // removes duplicate entries and compresses the matrix
1508  // the excess allocated memory is not released
1509  // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1510  eigen_assert(wi.size() == m_innerSize);
1511  constexpr StorageIndex kEmptyIndexValue(-1);
1512  wi.setConstant(kEmptyIndexValue);
1513  StorageIndex count = 0;
1514  const bool is_compressed = isCompressed();
1515  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1516  for (Index j = 0; j < m_outerSize; ++j) {
1517  const StorageIndex newBegin = count;
1518  const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1519  for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1520  StorageIndex i = m_data.index(k);
1521  if (wi(i) >= newBegin) {
1522  // entry at k is a duplicate
1523  // accumulate it into the primary entry located at wi(i)
1524  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1525  } else {
1526  // k is the primary entry in j with inner index i
1527  // shift it to the left and record its location at wi(i)
1528  m_data.index(count) = i;
1529  m_data.value(count) = m_data.value(k);
1530  wi(i) = count;
1531  ++count;
1532  }
1533  }
1534  m_outerIndex[j] = newBegin;
1535  }
1536  m_outerIndex[m_outerSize] = count;
1537  m_data.resize(count);
1538 
1539  // turn the matrix into compressed form (if it is not already)
1540  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1541  m_innerNonZeros = 0;
1542 }

◆ cols()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols ( void  ) const
inline
Returns
the number of columns of the matrix

Definition at line 167 of file SparseMatrix.h.

167 { return IsRowMajor ? m_innerSize : m_outerSize; }

◆ conservativeResize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize ( Index  rows,
Index  cols 
)
inline

Resizes the matrix to a rows x cols matrix leaving old values untouched.

If the sizes of the matrix are decreased, then the matrix is turned to uncompressed-mode and the storage of the out of bounds coefficients is kept and reserved. Call makeCompressed() to pack the entries and squeeze extra memory.

See also
reserve(), setZero(), makeCompressed()

Definition at line 686 of file SparseMatrix.h.

686  {
687 
688  // If one dimension is null, then there is nothing to be preserved
689  if (rows == 0 || cols == 0) return resize(rows, cols);
690 
691  Index newOuterSize = IsRowMajor ? rows : cols;
692  Index newInnerSize = IsRowMajor ? cols : rows;
693 
694  Index innerChange = newInnerSize - m_innerSize;
695  Index outerChange = newOuterSize - m_outerSize;
696 
697  if (outerChange != 0) {
698  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
699  m_outerIndex, newOuterSize + 1, m_outerSize + 1);
700 
701  if (!isCompressed())
702  m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
703  m_innerNonZeros, newOuterSize, m_outerSize);
704 
705  if (outerChange > 0) {
707  std::fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
708 
709  if (!isCompressed()) std::fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
710  }
711  }
712  m_outerSize = newOuterSize;
713 
714  if (innerChange < 0) {
715  for (Index j = 0; j < m_outerSize; j++) {
716  Index start = m_outerIndex[j];
717  Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
718  Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
719  if (lb != end) {
720  uncompress();
721  m_innerNonZeros[j] = StorageIndex(lb - start);
722  }
723  }
724  }
725  m_innerSize = newInnerSize;
726 
727  Index newSize = m_outerIndex[m_outerSize];
728  eigen_assert(newSize <= m_data.size());
729  m_data.resize(newSize);
730  }

◆ data() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Storage& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::data ( )
inline

Definition at line 211 of file SparseMatrix.h.

211 { return m_data; }

◆ data() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const Storage& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::data ( ) const
inline

Definition at line 213 of file SparseMatrix.h.

213 { return m_data; }

◆ diagonal() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
DiagonalReturnType Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::diagonal ( )
inline
Returns
a read-write expression of the diagonal coefficients.
Warning
If the diagonal entries are written, then all diagonal entries must already exist, otherwise an assertion will be raised.

Definition at line 770 of file SparseMatrix.h.

770 { return DiagonalReturnType(*this); }
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:139

◆ diagonal() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const ConstDiagonalReturnType Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::diagonal ( ) const
inline
Returns
a const expression of the diagonal coefficients.

Definition at line 764 of file SparseMatrix.h.

764 { return ConstDiagonalReturnType(*this); }
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition: SparseMatrix.h:140

◆ EIGEN_STATIC_ASSERT()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::EIGEN_STATIC_ASSERT ( (Options &(ColMajor|RowMajor))  = =Options,
INVALID_MATRIX_TEMPLATE_PARAMETERS   
)
inlineprivate

Definition at line 1148 of file SparseMatrix.h.

1150  {
1151  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1152  inline bool operator() (const Index&, const Index&, const Scalar& value) const
1153  {
1154  return !internal::isMuchSmallerThan(value, reference, epsilon);
1155  }
1156  Scalar reference;
1157  RealScalar epsilon;
1158  };
IndexedView_or_Block operator()(const RowIndices &rowIndices, const ColIndices &colIndices)
bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())

◆ finalize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::finalize ( )
inline

Definition at line 457 of file SparseMatrix.h.

458  {
459  if(isCompressed())
460  {
461  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
462  Index i = m_outerSize;
463  // find the last filled column
464  while (i>=0 && m_outerIndex[i]==0)
465  --i;
466  ++i;
467  while (i<=m_outerSize)
468  {
469  m_outerIndex[i] = size;
470  ++i;
471  }
472  }
473  }

◆ initAssignment()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename Other >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::initAssignment ( const Other &  other)
inlineprotected

Definition at line 974 of file SparseMatrix.h.

975  {
976  resize(other.rows(), other.cols());
977  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
978  m_innerNonZeros = 0;
979  }

◆ innerIndexPtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerIndexPtr ( )
inline
Returns
a non-const pointer to the array of inner indices. This function is aimed at interoperability with other libraries.
See also
valuePtr(), outerIndexPtr()

Definition at line 190 of file SparseMatrix.h.

190 { return m_data.indexPtr(); }

◆ innerIndexPtr() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerIndexPtr ( ) const
inline
Returns
a const pointer to the array of inner indices. This function is aimed at interoperability with other libraries.
See also
valuePtr(), outerIndexPtr()

Definition at line 186 of file SparseMatrix.h.

186 { return m_data.indexPtr(); }

◆ innerNonZeroPtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerNonZeroPtr ( )
inline
Returns
a non-const pointer to the array of the number of non zeros of the inner vectors. This function is aimed at interoperability with other libraries.
Warning
it returns the null pointer 0 in compressed mode

Definition at line 208 of file SparseMatrix.h.

208 { return m_innerNonZeros; }

◆ innerNonZeroPtr() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerNonZeroPtr ( ) const
inline
Returns
a const pointer to the array of the number of non zeros of the inner vectors. This function is aimed at interoperability with other libraries.
Warning
it returns the null pointer 0 in compressed mode

Definition at line 204 of file SparseMatrix.h.

204 { return m_innerNonZeros; }

◆ innerSize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerSize ( ) const
inline
Returns
the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major)

Definition at line 170 of file SparseMatrix.h.

170 { return m_innerSize; }

◆ insert()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insert ( Index  row,
Index  col 
)
inline
Returns
a reference to a novel non zero coefficient with coordinates row x col. The non zero coefficient must not already exist.

If the matrix *this is in compressed mode, then *this is turned into uncompressed mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier. In this case, the insertion procedure is optimized for a sequential insertion mode where elements are assumed to be inserted by increasing outer-indices.

If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.

Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1) if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.

Definition at line 1620 of file SparseMatrix.h.

1620  {
1622 }
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:566

◆ insertAtByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertAtByOuterInner ( Index  outer,
Index  inner,
Index  dst 
)
inlineprotected

Definition at line 1626 of file SparseMatrix.h.

1626  {
1627  // random insertion into compressed matrix is very slow
1628  uncompress();
1629  return insertUncompressedAtByOuterInner(outer, inner, dst);
1630 }
Scalar & insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst)

◆ insertBack()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBack ( Index  row,
Index  col 
)
inline

Definition at line 418 of file SparseMatrix.h.

419  {
421  }
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:425

◆ insertBackByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInner ( Index  outer,
Index  inner 
)
inline

Definition at line 425 of file SparseMatrix.h.

426  {
427  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
428  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
429  StorageIndex p = m_outerIndex[outer+1];
430  ++m_outerIndex[outer+1];
431  m_data.append(Scalar(0), inner);
432  return m_data.value(p);
433  }
float * p

◆ insertBackByOuterInnerUnordered()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInnerUnordered ( Index  outer,
Index  inner 
)
inline

Definition at line 437 of file SparseMatrix.h.

438  {
439  StorageIndex p = m_outerIndex[outer+1];
440  ++m_outerIndex[outer+1];
441  m_data.append(Scalar(0), inner);
442  return m_data.value(p);
443  }

◆ insertBackUncompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackUncompressed ( Index  row,
Index  col 
)
inline

Definition at line 1007 of file SparseMatrix.h.

1008  {
1009  const Index outer = IsRowMajor ? row : col;
1010  const Index inner = IsRowMajor ? col : row;
1011 
1013  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
1014 
1015  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
1016  m_data.index(p) = StorageIndex(inner);
1017  m_data.value(p) = Scalar(0);
1018  return m_data.value(p);
1019  }

◆ insertByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertByOuterInner ( Index  j,
Index  i 
)
inline

Definition at line 566 of file SparseMatrix.h.

567  {
568  Index start = m_outerIndex[j];
569  Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
570  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
571  if (dst == end) {
572  Index capacity = m_outerIndex[j + 1] - end;
573  if (capacity > 0) {
574  // implies uncompressed: push to back of vector
575  m_innerNonZeros[j]++;
576  m_data.index(end) = StorageIndex(i);
577  m_data.value(end) = Scalar(0);
578  return m_data.value(end);
579  }
580  }
581  eigen_assert((dst == end || m_data.index(dst) != i) &&
582  "you cannot insert an element that already exists, you must call coeffRef to this end");
583  return insertAtByOuterInner(j, i, dst);
584  }

◆ insertCompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_DEPRECATED EIGEN_DONT_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertCompressed ( Index  row,
Index  col 
)
protected

Definition at line 1658 of file SparseMatrix.h.

1658  {
1660  Index outer = IsRowMajor ? row : col;
1661  Index inner = IsRowMajor ? col : row;
1662  Index start = m_outerIndex[outer];
1663  Index end = m_outerIndex[outer + 1];
1664  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1665  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1666  "you cannot insert an element that already exists, you must call coeffRef to this end");
1667  return insertCompressedAtByOuterInner(outer, inner, dst);
1668 }
Scalar & insertCompressedAtByOuterInner(Index outer, Index inner, Index dst)

◆ insertCompressedAtByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertCompressedAtByOuterInner ( Index  outer,
Index  inner,
Index  dst 
)
protected

Definition at line 1672 of file SparseMatrix.h.

1672  {
1674  // compressed insertion always requires expanding the buffer
1675  // first, check if there is adequate allocated memory
1676  if (m_data.allocatedSize() <= m_data.size()) {
1677  // if there is no capacity for a single insertion, double the capacity
1678  // increase capacity by a mininum of 32
1679  Index minReserve = 32;
1680  Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1681  m_data.reserve(reserveSize);
1682  }
1683  m_data.resize(m_data.size() + 1);
1684  Index chunkSize = m_outerIndex[m_outerSize] - dst;
1685  // shift the existing data to the right if necessary
1686  m_data.moveChunk(dst, dst + 1, chunkSize);
1687  // update nonzero counts
1688  // potentially O(outerSize) bottleneck!
1689  for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1690  // initialize the coefficient
1691  m_data.index(dst) = StorageIndex(inner);
1692  m_data.value(dst) = Scalar(0);
1693  // return a reference to the coefficient
1694  return m_data.value(dst);
1695 }
EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)

◆ insertEmptyOuterVectors()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors ( Index  j,
Index  num = 1 
)
inline

Definition at line 509 of file SparseMatrix.h.

509  {
510  EIGEN_USING_STD(fill_n);
511  eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
512 
513  const Index newRows = IsRowMajor ? m_outerSize + num : rows();
514  const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
515 
516  const Index begin = j;
517  const Index end = m_outerSize;
518  const Index target = j + num;
519 
520  // expand the matrix to the larger size
521  conservativeResize(newRows, newCols);
522 
523  // shift m_outerIndex and m_innerNonZeros [num] to the right
525  // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
526  fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
527 
528  if (!isCompressed()) {
530  // set the nonzeros of the newly inserted vectors to 0
531  fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
532  }
533  }
#define EIGEN_USING_STD(FUNC)
Definition: Macros.h:1080
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:686
void smart_memmove(const T *start, const T *end, T *target)
Definition: Memory.h:625

◆ insertFromSortedTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

The same as insertFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage. Two triplets a and b are appropriately ordered if:

ColMajor: ((a.col() != b.col()) ? (a.col() < b.col()) : (a.row() < b.row())
RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())
Array< int, 3, 1 > b
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323

Definition at line 1482 of file SparseMatrix.h.

1483 {
1484  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1485 }

◆ insertFromSortedTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as insertFromSortedTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.insertFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });

Definition at line 1498 of file SparseMatrix.h.

1499 {
1500  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1501 }

◆ insertFromTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

Insert a batch of elements into the matrix *this with the list of triplets defined in the half-open range from begin to end.

A triplet is a tuple (i,j,value) defining a non-zero element. The input list of triplets does not have to be sorted, and may contain duplicated elements. In any case, the result is a sorted and compressed sparse matrix where the duplicates have been summed up. This is a O(n) operation, with n the number of triplet elements. The initial contents of *this are preserved (except for the summation of duplicate elements). The matrix *this must be properly sized beforehand. The sizes are not extracted from the triplet list.

The InputIterators value_type must provide the following interface:

Scalar value() const; // the value
IndexType row() const; // the row index i
IndexType col() const; // the column index j

See for instance the Eigen::Triplet template class.

Here is a typical usage example:

SparseMatrixType m(rows,cols); // m contains nonzero entries
typedef Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
m.insertFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
Matrix3f m
Eigen::Triplet< double > T
Warning
The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather be explicitly stored into a std::vector for instance.

Definition at line 1452 of file SparseMatrix.h.

1453 {
1454  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1455 }

◆ insertFromTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as insertFromTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.insertFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });

Definition at line 1468 of file SparseMatrix.h.

1469 {
1470  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1471 }

◆ insertUncompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_DEPRECATED EIGEN_DONT_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertUncompressed ( Index  row,
Index  col 
)
protected

Definition at line 1634 of file SparseMatrix.h.

1634  {
1636  Index outer = IsRowMajor ? row : col;
1637  Index inner = IsRowMajor ? col : row;
1638  Index start = m_outerIndex[outer];
1639  Index end = start + m_innerNonZeros[outer];
1640  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1641  if (dst == end) {
1642  Index capacity = m_outerIndex[outer + 1] - end;
1643  if (capacity > 0) {
1644  // implies uncompressed: push to back of vector
1645  m_innerNonZeros[outer]++;
1646  m_data.index(end) = StorageIndex(inner);
1647  m_data.value(end) = Scalar(0);
1648  return m_data.value(end);
1649  }
1650  }
1651  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1652  "you cannot insert an element that already exists, you must call coeffRef to this end");
1653  return insertUncompressedAtByOuterInner(outer, inner, dst);
1654 }

◆ insertUncompressedAtByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertUncompressedAtByOuterInner ( Index  outer,
Index  inner,
Index  dst 
)
protected

Definition at line 1699 of file SparseMatrix.h.

1699  {
1701  // find a vector with capacity, starting at `outer` and searching to the left and right
1702  for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1703  if (rightTarget < m_outerSize) {
1704  Index start = m_outerIndex[rightTarget];
1705  Index end = start + m_innerNonZeros[rightTarget];
1706  Index nextStart = m_outerIndex[rightTarget + 1];
1707  Index capacity = nextStart - end;
1708  if (capacity > 0) {
1709  // move [dst, end) to dst+1 and insert at dst
1710  Index chunkSize = end - dst;
1711  if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1712  m_innerNonZeros[outer]++;
1713  for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1714  m_data.index(dst) = StorageIndex(inner);
1715  m_data.value(dst) = Scalar(0);
1716  return m_data.value(dst);
1717  }
1718  rightTarget++;
1719  }
1720  if (leftTarget >= 0) {
1721  Index start = m_outerIndex[leftTarget];
1722  Index end = start + m_innerNonZeros[leftTarget];
1723  Index nextStart = m_outerIndex[leftTarget + 1];
1724  Index capacity = nextStart - end;
1725  if (capacity > 0) {
1726  // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1727  // move [nextStart, dst) to nextStart-1 and insert at dst-1
1728  Index chunkSize = dst - nextStart;
1729  if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1730  m_innerNonZeros[outer]++;
1731  for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1732  m_data.index(dst - 1) = StorageIndex(inner);
1733  m_data.value(dst - 1) = Scalar(0);
1734  return m_data.value(dst - 1);
1735  }
1736  leftTarget--;
1737  }
1738  }
1739 
1740  // no room for interior insertion
1741  // nonZeros() == m_data.size()
1742  // record offset as outerIndxPtr will change
1743  Index dst_offset = dst - m_outerIndex[outer];
1744  // allocate space for random insertion
1745  if (m_data.allocatedSize() == 0) {
1746  // fast method to allocate space for one element per vector in empty matrix
1747  m_data.resize(m_outerSize);
1748  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1749  } else {
1750  // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1751  Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1752  eigen_assert(maxReserveSize > 0);
1753  if (m_outerSize <= maxReserveSize) {
1754  // allocate space for one additional element per vector
1755  reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1756  } else {
1757  // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1758  // allocate space for one additional element in the interval [outer,maxReserveSize)
1759  typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1760  typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1761  ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1762  reserveInnerVectors(reserveSizesXpr);
1763  }
1764  }
1765  // insert element at `dst` with new outer indices
1766  Index start = m_outerIndex[outer];
1767  Index end = start + m_innerNonZeros[outer];
1768  Index new_dst = start + dst_offset;
1769  Index chunkSize = end - new_dst;
1770  if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1771  m_innerNonZeros[outer]++;
1772  m_data.index(new_dst) = StorageIndex(inner);
1773  m_data.value(new_dst) = Scalar(0);
1774  return m_data.value(new_dst);
1775 }
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:332

◆ isCompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
bool Eigen::SparseCompressedBase< Derived >::isCompressed
inline
Returns
whether *this is in compressed form.

Definition at line 112 of file SparseCompressedBase.h.

112 { return innerNonZeroPtr()==0; }
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:204

◆ makeCompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::makeCompressed ( )
inline

Turns the matrix into the compressed format.

Definition at line 588 of file SparseMatrix.h.

589  {
590  if (isCompressed()) return;
591 
593 
594  StorageIndex start = m_outerIndex[1];
596  // try to move fewer, larger contiguous chunks
597  Index copyStart = start;
598  Index copyTarget = m_innerNonZeros[0];
599  for (Index j = 1; j < m_outerSize; j++)
600  {
601  StorageIndex end = start + m_innerNonZeros[j];
602  StorageIndex nextStart = m_outerIndex[j + 1];
603  // dont forget to move the last chunk!
604  bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
605  if (breakUpCopy)
606  {
607  Index chunkSize = end - copyStart;
608  if(chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
609  copyStart = nextStart;
610  copyTarget += chunkSize;
611  }
612  start = nextStart;
614  }
616 
617  // release as much memory as possible
618  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
619  m_innerNonZeros = 0;
620  m_data.squeeze();
621  }
#define eigen_internal_assert(x)
Definition: Macros.h:908

◆ nonZeros()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseCompressedBase< Derived >::nonZeros
inline
Returns
the number of non zero coefficients

Definition at line 61 of file SparseCompressedBase.h.

62  {
63  if (Derived::IsVectorAtCompileTime && outerIndexPtr() == 0)
64  return derived().nonZeros();
65  else if (derived().outerSize() == 0)
66  return 0;
67  else if (isCompressed())
68  return outerIndexPtr()[derived().outerSize()] - outerIndexPtr()[0];
69  else
70  return innerNonZeros().sum();
71  }
Index outerSize() const
Definition: SparseMatrix.h:172
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:195
SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived()
Definition: EigenBase.h:48

◆ operator=() [1/4]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const SparseMatrix< Scalar_, Options_, StorageIndex_ > &  other)
inline

Definition at line 869 of file SparseMatrix.h.

870  {
871  if (other.isRValue())
872  {
873  swap(other.const_cast_derived());
874  }
875  else if(this!=&other)
876  {
877  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
878  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
879  #endif
880  initAssignment(other);
881  if(other.isCompressed())
882  {
883  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
884  m_data = other.m_data;
885  }
886  else
887  {
888  Base::operator=(other);
889  }
890  }
891  return *this;
892  }
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:844
void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:601

◆ operator=() [2/4]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const SparseMatrixBase< OtherDerived > &  other)

◆ operator=() [3/4]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrix<Scalar,Options_,StorageIndex_>& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const SparseMatrixBase< OtherDerived > &  other)

Definition at line 1547 of file SparseMatrix.h.

1548 {
1549  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1550  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1551 
1552  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1553  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1554  #endif
1555 
1556  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1557  if (needToTranspose)
1558  {
1559  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1560  EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1561  #endif
1562  // two passes algorithm:
1563  // 1 - compute the number of coeffs per dest inner vector
1564  // 2 - do the actual copy/eval
1565  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1566  typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1567  typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1568  typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1569  OtherCopy otherCopy(other.derived());
1570  OtherCopyEval otherCopyEval(otherCopy);
1571 
1572  SparseMatrix dest(other.rows(),other.cols());
1573  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1574 
1575  // pass 1
1576  // FIXME the above copy could be merged with that pass
1577  for (Index j=0; j<otherCopy.outerSize(); ++j)
1578  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1579  ++dest.m_outerIndex[it.index()];
1580 
1581  // prefix sum
1582  StorageIndex count = 0;
1583  IndexVector positions(dest.outerSize());
1584  for (Index j=0; j<dest.outerSize(); ++j)
1585  {
1586  StorageIndex tmp = dest.m_outerIndex[j];
1587  dest.m_outerIndex[j] = count;
1588  positions[j] = count;
1589  count += tmp;
1590  }
1591  dest.m_outerIndex[dest.outerSize()] = count;
1592  // alloc
1593  dest.m_data.resize(count);
1594  // pass 2
1595  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1596  {
1597  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1598  {
1599  Index pos = positions[it.index()]++;
1600  dest.m_data.index(pos) = j;
1601  dest.m_data.value(pos) = it.value();
1602  }
1603  }
1604  this->swap(dest);
1605  return *this;
1606  }
1607  else
1608  {
1609  if(other.isRValue())
1610  {
1611  initAssignment(other.derived());
1612  }
1613  // there is no special optimization
1614  return Base::operator=(other.derived());
1615  }
1616 }
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:98
Base::IndexVector IndexVector
Definition: SparseMatrix.h:151

◆ operator=() [4/4]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &&  other)
inline

Definition at line 894 of file SparseMatrix.h.

894  {
895  return *this = other.derived().markAsRValue();
896  }

◆ outerIndexPtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerIndexPtr ( )
inline
Returns
a non-const pointer to the array of the starting positions of the inner vectors. This function is aimed at interoperability with other libraries.
See also
valuePtr(), innerIndexPtr()

Definition at line 199 of file SparseMatrix.h.

199 { return m_outerIndex; }

◆ outerIndexPtr() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerIndexPtr ( ) const
inline
Returns
a const pointer to the array of the starting positions of the inner vectors. This function is aimed at interoperability with other libraries.
See also
valuePtr(), innerIndexPtr()

Definition at line 195 of file SparseMatrix.h.

195 { return m_outerIndex; }

◆ outerSize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerSize ( ) const
inline
Returns
the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major)

Definition at line 172 of file SparseMatrix.h.

172 { return m_outerSize; }

◆ prune() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename KeepFunc >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune ( const KeepFunc &  keep = KeepFunc())
inline

Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate keep. The functor type KeepFunc must implement the following function:

bool operator() (const Index& row, const Index& col, const Scalar& value) const;
See also
prune(Scalar,RealScalar)

Definition at line 648 of file SparseMatrix.h.

649  {
650  StorageIndex k = 0;
651  for(Index j=0; j<m_outerSize; ++j)
652  {
653  StorageIndex previousStart = m_outerIndex[j];
654  if (isCompressed())
655  m_outerIndex[j] = k;
656  else
657  k = m_outerIndex[j];
658  StorageIndex end = isCompressed() ? m_outerIndex[j+1] : previousStart + m_innerNonZeros[j];
659  for(StorageIndex i=previousStart; i<end; ++i)
660  {
663  bool keepEntry = keep(row, col, m_data.value(i));
664  if (keepEntry) {
665  m_data.value(k) = m_data.value(i);
666  m_data.index(k) = m_data.index(i);
667  ++k;
668  } else if (!isCompressed())
669  m_innerNonZeros[j]--;
670  }
671  }
672  if (isCompressed()) {
674  m_data.resize(k, 0);
675  }
676  }

◆ prune() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune ( const Scalar reference,
const RealScalar &  epsilon = NumTraits<RealScalar>::dummy_precision() 
)
inline

Suppresses all nonzeros which are much smaller than reference under the tolerance epsilon

Definition at line 635 of file SparseMatrix.h.

636  {
637  prune(default_prunning_func(reference,epsilon));
638  }
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:635

◆ removeOuterVectors()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors ( Index  j,
Index  num = 1 
)
inline

Definition at line 476 of file SparseMatrix.h.

476  {
477  eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
478 
479  const Index newRows = IsRowMajor ? m_outerSize - num : rows();
480  const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
481 
482  const Index begin = j + num;
483  const Index end = m_outerSize;
484  const Index target = j;
485 
486  // if the removed vectors are not empty, uncompress the matrix
487  if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
488 
489  // shift m_outerIndex and m_innerNonZeros [num] to the left
491  if (!isCompressed())
493 
494  // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
495  if (m_outerIndex[0] > StorageIndex(0)) {
496  uncompress();
497  const Index from = internal::convert_index<Index>(m_outerIndex[0]);
498  const Index to = Index(0);
499  const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500  m_data.moveChunk(from, to, chunkSize);
501  m_outerIndex[0] = StorageIndex(0);
502  }
503 
504  // truncate the matrix to the smaller size
505  conservativeResize(newRows, newCols);
506  }

◆ reserve() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<class SizesType >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve ( const SizesType &  reserveSizes)
inline

Preallocates reserveSize[j] non zeros for each column (resp. row) j.

This function turns the matrix in non-compressed mode.

The type SizesType must expose the following interface:

for i in the [0,this->outerSize()[ range. Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.

◆ reserve() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve ( Index  reserveSize)
inline

Preallocates reserveSize non zeros.

Precondition: the matrix must be in compressed mode.

Definition at line 300 of file SparseMatrix.h.

301  {
302  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
303  m_data.reserve(reserveSize);
304  }

◆ reserveInnerVectors()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<class SizesType >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserveInnerVectors ( const SizesType &  reserveSizes)
inlineprotected

Definition at line 332 of file SparseMatrix.h.

333  {
334  if(isCompressed())
335  {
336  Index totalReserveSize = 0;
337  for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
338 
339  // if reserveSizes is empty, don't do anything!
340  if (totalReserveSize == 0) return;
341 
342  // turn the matrix into non-compressed mode
343  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
344 
345  // temporarily use m_innerSizes to hold the new starting points.
346  StorageIndex* newOuterIndex = m_innerNonZeros;
347 
348  Index count = 0;
349  for(Index j=0; j<m_outerSize; ++j)
350  {
351  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
352  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
353  count += reserveSize + internal::convert_index<Index>(m_outerIndex[j+1]-m_outerIndex[j]);
354  }
355 
356  m_data.reserve(totalReserveSize);
357  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
358  for(Index j=m_outerSize-1; j>=0; --j)
359  {
360  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
361  StorageIndex begin = m_outerIndex[j];
362  StorageIndex end = begin + innerNNZ;
363  StorageIndex target = newOuterIndex[j];
365  internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
366  previousOuterIndex = m_outerIndex[j];
367  m_outerIndex[j] = newOuterIndex[j];
368  m_innerNonZeros[j] = innerNNZ;
369  }
370  if(m_outerSize>0)
371  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + internal::convert_index<StorageIndex>(reserveSizes[m_outerSize-1]);
372 
374  }
375  else
376  {
377  StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
378 
379  Index count = 0;
380  for(Index j=0; j<m_outerSize; ++j)
381  {
382  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
383  Index alreadyReserved = internal::convert_index<Index>(m_outerIndex[j+1] - m_outerIndex[j] - m_innerNonZeros[j]);
384  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
385  Index toReserve = numext::maxi(reserveSize, alreadyReserved);
386  count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
387  }
388  newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
389 
390  m_data.resize(count);
391  for(Index j=m_outerSize-1; j>=0; --j)
392  {
393  StorageIndex innerNNZ = m_innerNonZeros[j];
394  StorageIndex begin = m_outerIndex[j];
395  StorageIndex target = newOuterIndex[j];
396  m_data.moveChunk(begin, target, innerNNZ);
397  }
398 
399  std::swap(m_outerIndex, newOuterIndex);
400  internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
401  }
402 
403  }
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:788

◆ resize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize ( Index  rows,
Index  cols 
)
inline

Resizes the matrix to a rows x cols matrix and initializes it to zero.

This function does not free the currently allocated memory. To release as much as memory as possible, call

mat.data().squeeze();

after resizing it.

See also
reserve(), setZero()

Definition at line 739 of file SparseMatrix.h.

740  {
741  const Index outerSize = IsRowMajor ? rows : cols;
743  m_data.clear();
744 
745  if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
746  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1, m_outerSize + 1);
748  }
749 
750  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
751  m_innerNonZeros = 0;
752 
753  std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
754  }

◆ resizeNonZeros()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resizeNonZeros ( Index  size)
inline

Definition at line 758 of file SparseMatrix.h.

759  {
760  m_data.resize(size);
761  }

◆ rows()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows ( void  ) const
inline
Returns
the number of rows of the matrix

Definition at line 165 of file SparseMatrix.h.

165 { return IsRowMajor ? m_outerSize : m_innerSize; }

◆ setFromSortedTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

The same as setFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage. Two triplets a and b are appropriately ordered if:

ColMajor: ((a.col() != b.col()) ? (a.col() < b.col()) : (a.row() < b.row())
RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())

Definition at line 1392 of file SparseMatrix.h.

1393 {
1394  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1395 }

◆ setFromSortedTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as setFromSortedTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.setFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });

Definition at line 1408 of file SparseMatrix.h.

1409 {
1410  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1411 }

◆ setFromTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

Fill the matrix *this with the list of triplets defined in the half-open range from begin to end.

A triplet is a tuple (i,j,value) defining a non-zero element. The input list of triplets does not have to be sorted, and may contain duplicated elements. In any case, the result is a sorted and compressed sparse matrix where the duplicates have been summed up. This is a O(n) operation, with n the number of triplet elements. The initial contents of *this are destroyed. The matrix *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, or the resize(Index,Index) method. The sizes are not extracted from the triplet list.

The InputIterators value_type must provide the following interface:

Scalar value() const; // the value
IndexType row() const; // the row index i
IndexType col() const; // the column index j

See for instance the Eigen::Triplet template class.

Here is a typical usage example:

typedef Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
SparseMatrixType m(rows,cols);
m.setFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
Warning
The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather be explicitly stored into a std::vector for instance.

Definition at line 1362 of file SparseMatrix.h.

1363 {
1364  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1365 }

◆ setFromTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as setFromTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });

Definition at line 1378 of file SparseMatrix.h.

1379 {
1380  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
1381 }

◆ setIdentity()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setIdentity ( )
inline

Sets *this to the identity matrix. This function also turns the matrix into compressed mode, and drop any reserved memory.

Definition at line 856 of file SparseMatrix.h.

857  {
858  eigen_assert(m_outerSize == m_innerSize && "ONLY FOR SQUARED MATRICES");
859  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
860  m_innerNonZeros = 0;
861  m_data.resize(m_outerSize);
862  // is it necessary to squeeze?
863  m_data.squeeze();
864  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
865  std::iota(innerIndexPtr(), innerIndexPtr() + m_outerSize, StorageIndex(0));
866  std::fill_n(valuePtr(), m_outerSize, Scalar(1));
867  }

◆ setZero()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setZero ( )
inline

Removes all non zeros but keep allocated memory

This function does not free the currently allocated memory. To release as much as memory as possible, call

mat.data().squeeze();

after resizing it.

See also
resize(Index,Index), data()

Definition at line 288 of file SparseMatrix.h.

289  {
290  m_data.clear();
291  std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
292  if(m_innerNonZeros) {
293  std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
294  }
295  }

◆ startVec()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::startVec ( Index  outer)
inline

Definition at line 447 of file SparseMatrix.h.

448  {
449  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
450  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
451  m_outerIndex[outer+1] = m_outerIndex[outer];
452  }

◆ sum()

template<typename Scalar_ , int Options_, typename Index_ >
internal::traits< SparseMatrix< Scalar_, Options_, Index_ > >::Scalar Eigen::SparseMatrix< Scalar_, Options_, Index_ >::sum

Overloaded for performance

Definition at line 32 of file SparseRedux.h.

33 {
34  eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
35  if(this->isCompressed())
36  return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
37  else
38  return Base::sum();
39 }
Scalar sum() const
Definition: SparseRedux.h:19

◆ swap()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &  other)
inline

Swaps the content of two sparse matrices of the same type. This is a fast operation that simply swaps the underlying pointers and parameters.

Definition at line 844 of file SparseMatrix.h.

845  {
846  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
847  std::swap(m_outerIndex, other.m_outerIndex);
848  std::swap(m_innerSize, other.m_innerSize);
849  std::swap(m_outerSize, other.m_outerSize);
850  std::swap(m_innerNonZeros, other.m_innerNonZeros);
851  m_data.swap(other.m_data);
852  }

◆ uncompress()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::uncompress ( )
inline

Turns the matrix into the uncompressed mode

Definition at line 624 of file SparseMatrix.h.

625  {
626  if (!isCompressed()) return;
627  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
628  if (m_outerIndex[m_outerSize] == 0)
629  std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
630  else
631  for (Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
632  }

◆ valuePtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::valuePtr ( )
inline
Returns
a non-const pointer to the array of values. This function is aimed at interoperability with other libraries.
See also
innerIndexPtr(), outerIndexPtr()

Definition at line 181 of file SparseMatrix.h.

181 { return m_data.valuePtr(); }

◆ valuePtr() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const Scalar* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::valuePtr ( ) const
inline
Returns
a const pointer to the array of values. This function is aimed at interoperability with other libraries.
See also
innerIndexPtr(), outerIndexPtr()

Definition at line 177 of file SparseMatrix.h.

177 { return m_data.valuePtr(); }

Member Data Documentation

◆ m_data

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Storage Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data
protected

Definition at line 160 of file SparseMatrix.h.

◆ m_innerNonZeros

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros
protected

Definition at line 159 of file SparseMatrix.h.

◆ m_innerSize

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize
protected

Definition at line 157 of file SparseMatrix.h.

◆ m_outerIndex

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex
protected

Definition at line 158 of file SparseMatrix.h.

◆ m_outerSize

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize
protected

Definition at line 156 of file SparseMatrix.h.


The documentation for this class was generated from the following files: