10 #ifndef EIGEN_SPARSEVECTOR_H
11 #define EIGEN_SPARSEVECTOR_H
31 template<
typename Scalar_,
int Options_,
typename StorageIndex_>
32 struct traits<SparseVector<Scalar_, Options_, StorageIndex_> >
34 typedef Scalar_ Scalar;
35 typedef StorageIndex_ StorageIndex;
36 typedef Sparse StorageKind;
37 typedef MatrixXpr XprKind;
41 RowsAtCompileTime = IsColVector ?
Dynamic : 1,
42 ColsAtCompileTime = IsColVector ? 1 :
Dynamic,
43 MaxRowsAtCompileTime = RowsAtCompileTime,
44 MaxColsAtCompileTime = ColsAtCompileTime,
57 template<
typename Dest,
typename Src,
59 : Src::InnerSizeAtCompileTime==1 ?
SVA_Outer
61 struct sparse_vector_assign_selector;
65 template<
typename Scalar_,
int Options_,
typename StorageIndex_>
77 enum {
IsColVector = internal::traits<SparseVector>::IsColVector };
88 EIGEN_STRONG_INLINE
const Scalar*
valuePtr()
const {
return m_data.valuePtr(); }
136 typedef typename Base::InnerIterator InnerIterator;
137 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
159 return m_data.value(m_data.size()-1);
171 return m_data.value(m_data.size()-1);
191 m_data.resize(
p+2,1);
193 while ( (
p >= startId) && (m_data.index(
p) >
i) )
195 m_data.index(
p+1) = m_data.index(
p);
196 m_data.value(
p+1) = m_data.value(
p);
200 m_data.value(
p+1) = 0;
201 return m_data.value(
p+1);
206 inline void reserve(
Index reserveSize) { m_data.reserve(reserveSize); }
230 if (keep_predicate(m_data.value(
i)))
232 m_data.value(k) = std::move(m_data.value(
i));
233 m_data.index(k) = m_data.index(
i);
277 while (
i<m_data.size() && m_data.index(
i)<newSize) ++
i;
291 template<
typename OtherDerived>
295 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
296 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
314 m_data.swap(other.m_data);
317 template<
int OtherOptions>
322 m_data.swap(other.
m_data);
334 m_data = other.m_data;
339 template<
typename OtherDerived>
343 internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.
derived());
348 #ifndef EIGEN_PARSED_BY_DOXYGEN
349 template<
typename Lhs,
typename Rhs>
360 s <<
"(" <<
m.m_data.value(
i) <<
"," <<
m.m_data.index(
i) <<
") ";
392 return m_data.value(m_data.size()-1);
417 # ifdef EIGEN_SPARSEVECTOR_PLUGIN
418 # include EIGEN_SPARSEVECTOR_PLUGIN
431 template<
typename Scalar_,
int Options_,
typename Index_>
432 struct evaluator<
SparseVector<Scalar_,Options_,Index_> >
433 : evaluator_base<SparseVector<Scalar_,Options_,Index_> >
436 typedef evaluator_base<SparseVectorType>
Base;
437 typedef typename SparseVectorType::InnerIterator InnerIterator;
438 typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
442 Flags = SparseVectorType::Flags
445 evaluator() :
Base() {}
447 explicit evaluator(
const SparseVectorType &
mat) : m_matrix(&
mat)
452 inline Index nonZerosEstimate()
const {
453 return m_matrix->nonZeros();
456 operator SparseVectorType&() {
return m_matrix->const_cast_derived(); }
457 operator const SparseVectorType&()
const {
return *m_matrix; }
459 const SparseVectorType *m_matrix;
462 template<
typename Dest,
typename Src>
463 struct sparse_vector_assign_selector<Dest,Src,
SVA_Inner> {
464 static void run(Dest& dst,
const Src& src) {
466 typedef internal::evaluator<Src> SrcEvaluatorType;
467 SrcEvaluatorType srcEval(src);
468 for(
typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
469 dst.insert(it.index()) = it.value();
473 template<
typename Dest,
typename Src>
474 struct sparse_vector_assign_selector<Dest,Src,
SVA_Outer> {
475 static void run(Dest& dst,
const Src& src) {
477 typedef internal::evaluator<Src> SrcEvaluatorType;
478 SrcEvaluatorType srcEval(src);
481 typename SrcEvaluatorType::InnerIterator it(srcEval,
i);
483 dst.insert(
i) = it.value();
488 template<
typename Dest,
typename Src>
490 static void run(Dest& dst,
const Src& src) {
491 if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
492 else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
500 template <
typename Scalar,
int Options,
typename StorageIndex>
511 return sizeof(Header) +
520 const size_t header_bytes =
sizeof(Header);
523 memcpy(dest, &header, header_bytes);
524 dest += header_bytes;
527 std::size_t data_bytes =
sizeof(
StorageIndex) * header.num_non_zeros;
532 data_bytes =
sizeof(
Scalar) * header.num_non_zeros;
533 memcpy(dest, value.
valuePtr(), data_bytes);
545 const size_t header_bytes =
sizeof(Header);
548 memcpy(&header, src, header_bytes);
552 value.
resize(header.size);
556 std::size_t data_bytes =
sizeof(
StorageIndex) * header.num_non_zeros;
562 data_bytes =
sizeof(
Scalar) * header.num_non_zeros;
564 memcpy(value.
valuePtr(), src, data_bytes);
RowXpr row(Index i)
This is the const version of row(). */.
ColXpr col(Index i)
This is the const version of col().
#define EIGEN_USING_STD(FUNC)
#define eigen_internal_assert(x)
#define EIGEN_PREDICT_FALSE(x)
#define EIGEN_UNUSED_VARIABLE(var)
#define EIGEN_DEVICE_FUNC
#define EIGEN_ONLY_USED_FOR_DEBUG(x)
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op)
#define EIGEN_INTERNAL_CHECK_COST_VALUE(C)
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, SparseMat &value) const
SparseVector< Scalar, Options, StorageIndex > SparseMat
uint8_t * serialize(uint8_t *dest, uint8_t *end, const SparseMat &value)
size_t size(const SparseMat &value) const
Common base class for sparse [compressed]-{row|column}-storage format.
SparseVector< Scalar_, Options_, StorageIndex_ > & operator=(const SparseVector< Scalar_, Options_, StorageIndex_ > &other)
Base class of any sparse matrices or sparse expressions.
internal::traits< SparseVector< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
internal::traits< SparseVector< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
static StorageIndex convert_index(const Index idx)
A versatible sparse matrix representation.
void startVec(Index outer)
EIGEN_DEPRECATED Scalar & fillrand(Index r, Index c)
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Scalar & coeffRef(Index row, Index col)
StorageIndex * innerNonZeroPtr()
void resizeNonZeros(Index size)
const Scalar * valuePtr() const
EIGEN_DEPRECATED Scalar & fill(Index i)
Scalar & insert(Index row, Index col)
void conservativeResize(Index newSize)
SparseVector(Index rows, Index cols)
Scalar & insertBackByOuterInner(Index outer, Index inner)
internal::CompressedStorage< Scalar, StorageIndex > Storage
const StorageIndex * outerIndexPtr() const
Index prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
friend std::ostream & operator<<(std::ostream &s, const SparseVector &m)
void resize(Index rows, Index cols)
Scalar & coeffRef(Index i)
StorageIndex * innerIndexPtr()
const StorageIndex * innerNonZeroPtr() const
void swap(SparseVector &other)
void swap(SparseMatrix< Scalar, OtherOptions, StorageIndex > &other)
const StorageIndex * innerIndexPtr() const
Scalar & insertBack(Index i)
Index prune(F &&keep_predicate)
Prunes the entries of the vector based on a predicate
Scalar coeff(Index row, Index col) const
SparseVector & operator=(const SparseMatrixBase< OtherDerived > &other)
const Storage & data() const
EIGEN_DEPRECATED void startFill(Index reserve)
EIGEN_DEPRECATED Scalar & fillrand(Index i)
void reserve(Index reserveSize)
Scalar & insertBackUnordered(Index i)
SparseCompressedBase< SparseVector > Base
EIGEN_DEPRECATED Scalar & fill(Index r, Index c)
SparseVector(const SparseMatrixBase< OtherDerived > &other)
StorageIndex * outerIndexPtr()
SparseVector(const SparseVector &other)
SparseVector & operator=(const SparseVector &other)
EIGEN_DEPRECATED void endFill()
EIGEN_DEPRECATED const Storage & _data() const
void resize(Index newSize)
Scalar coeff(Index i) const
EIGEN_DEPRECATED Storage & _data()
EIGEN_STATIC_ASSERT((Options_ &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) Storage m_data
static const lastp1_t end
const unsigned int LvalueBit
const unsigned int RowMajorBit
const unsigned int CompressedAccessBit
bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
void swap(scoped_array< T > &a, scoped_array< T > &b)
const unsigned int NestByRefBit
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
const int InnerRandomAccessPattern
Eigen::Index Index
The interface type of indices.
Derived & const_cast_derived() const
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.