10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
48 template<
typename Scalar_,
int Options_,
typename StorageIndex_>
49 struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_> >
51 typedef Scalar_ Scalar;
52 typedef StorageIndex_ StorageIndex;
53 typedef Sparse StorageKind;
54 typedef MatrixXpr XprKind;
65 template<
typename Scalar_,
int Options_,
typename StorageIndex_,
int DiagIndex>
66 struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
68 typedef SparseMatrix<Scalar_, Options_, StorageIndex_>
MatrixType;
69 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
70 typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
72 typedef Scalar_ Scalar;
73 typedef Dense StorageKind;
74 typedef StorageIndex_ StorageIndex;
75 typedef MatrixXpr XprKind;
79 ColsAtCompileTime = 1,
81 MaxColsAtCompileTime = 1,
86 template<
typename Scalar_,
int Options_,
typename StorageIndex_,
int DiagIndex>
87 struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
88 :
public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
95 template <
typename StorageIndex>
96 struct sparse_reserve_op {
100 m_end = begin + range;
101 m_val = StorageIndex(
size / range);
102 m_remainder = StorageIndex(
size % range);
104 template <
typename IndexType>
106 if ((
i >= m_begin) && (
i < m_end))
107 return m_val + ((
i - m_begin) < m_remainder ? 1 : 0);
111 StorageIndex m_val, m_remainder;
112 Index m_begin, m_end;
115 template <
typename Scalar>
116 struct functor_traits<sparse_reserve_op<Scalar>> {
117 enum { Cost = 1, PacketAccess =
false, IsRepeatable =
true };
122 template<
typename Scalar_,
int Options_,
typename StorageIndex_>
129 template<
typename,
typename,
typename,
typename,
typename>
130 friend struct internal::Assignment;
135 using Base::operator+=;
136 using Base::operator-=;
141 typedef typename Base::InnerIterator InnerIterator;
142 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
146 typedef internal::CompressedStorage<Scalar,StorageIndex>
Storage;
242 eigen_assert(
end >= start &&
"you probably called coeffRef on a non finalized matrix");
254 if ((dst <
end) && (
m_data.index(dst) == inner))
303 m_data.reserve(reserveSize);
306 #ifdef EIGEN_PARSED_BY_DOXYGEN
319 template<
class SizesType>
320 inline void reserve(
const SizesType& reserveSizes);
322 template<
class SizesType>
323 inline void reserve(
const SizesType& reserveSizes,
const typename SizesType::value_type& enableif =
324 typename SizesType::value_type())
331 template<
class SizesType>
336 Index totalReserveSize = 0;
337 for (
Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[
j]);
340 if (totalReserveSize == 0)
return;
351 newOuterIndex[
j] = internal::convert_index<StorageIndex>(count);
352 Index reserveSize = internal::convert_index<Index>(reserveSizes[
j]);
356 m_data.reserve(totalReserveSize);
382 newOuterIndex[
j] = internal::convert_index<StorageIndex>(count);
384 Index reserveSize = internal::convert_index<Index>(reserveSizes[
j]);
388 newOuterIndex[
m_outerSize] = internal::convert_index<StorageIndex>(count);
396 m_data.moveChunk(begin, target, innerNNZ);
400 internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex,
m_outerSize + 1);
482 const Index begin =
j + num;
500 m_data.moveChunk(from, to, chunkSize);
518 const Index target =
j + num;
535 template<
typename InputIterators>
538 template<
typename InputIterators,
typename DupFunctor>
541 template<
typename Derived,
typename DupFunctor>
544 template<
typename InputIterators>
547 template<
typename InputIterators,
typename DupFunctor>
550 template<
typename InputIterators>
553 template<
typename InputIterators,
typename DupFunctor>
556 template<
typename InputIterators>
559 template<
typename InputIterators,
typename DupFunctor>
582 "you cannot insert an element that already exists, you must call coeffRef to this end");
597 Index copyStart = start;
608 if(chunkSize > 0)
m_data.moveChunk(copyStart, copyTarget, chunkSize);
609 copyStart = nextStart;
610 copyTarget += chunkSize;
637 prune(default_prunning_func(reference,epsilon));
647 template<
typename KeepFunc>
648 void prune(
const KeepFunc& keep = KeepFunc())
697 if (outerChange != 0) {
698 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
702 m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
705 if (outerChange > 0) {
714 if (innerChange < 0) {
787 template<
typename OtherDerived>
792 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
798 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
799 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
806 template<
typename OtherDerived,
unsigned int UpLo>
815 *
this = other.derived().markAsRValue();
826 template<
typename OtherDerived>
835 template<
typename OtherDerived>
875 else if(
this!=&other)
877 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
878 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
895 return *
this = other.
derived().markAsRValue();
898 #ifndef EIGEN_PARSED_BY_DOXYGEN
899 template<
typename OtherDerived>
903 template<
typename Lhs,
typename Rhs>
907 template<
typename OtherDerived>
914 s <<
"Nonzero entries:\n";
917 for (Index i=0; i<m.nonZeros(); ++i)
918 s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
922 for (Index i=0; i<m.outerSize(); ++i)
924 Index p = m.m_outerIndex[i];
925 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
928 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
930 for (; k<m.m_outerIndex[i+1]; ++k) {
937 s <<
"Outer pointers:\n";
939 s <<
m.m_outerIndex[
i] <<
" ";
941 s <<
" $" << std::endl;
942 if(!
m.isCompressed())
944 s <<
"Inner non zeros:\n";
945 for (Index i=0; i<m.outerSize(); ++i) {
946 s << m.m_innerNonZeros[i] <<
" ";
948 s <<
" $" << std::endl;
952 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(
m);
960 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
961 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
967 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
968 # include EIGEN_SPARSEMATRIX_PLUGIN
973 template<
typename Other>
976 resize(other.rows(), other.cols());
977 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1013 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
1015 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
1018 return m_data.value(
p);
1036 template<
typename DiagXpr,
typename Func>
1043 Index n = diagXpr.size();
1045 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
1048 if((m_outerSize !=
n) || (m_innerSize !=
n))
1052 if(m_data.size()==0 || overwrite)
1054 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1055 m_innerNonZeros = 0;
1057 ValueMap valueMap(valuePtr(),
n);
1058 std::iota(m_outerIndex, m_outerIndex +
n + 1,
StorageIndex(0));
1059 std::iota(innerIndexPtr(), innerIndexPtr() +
n,
StorageIndex(0));
1065 internal::evaluator<DiagXpr> diaEval(diagXpr);
1069 insertionLocations.setConstant(kEmptyIndexVal);
1071 Index deferredInsertions = 0;
1075 Index begin = m_outerIndex[
j];
1076 Index end = isCompressed() ? m_outerIndex[
j + 1] : begin + m_innerNonZeros[
j];
1077 Index capacity = m_outerIndex[
j + 1] -
end;
1078 Index dst = m_data.searchLowerIndex(begin,
end,
j);
1080 if (dst !=
end && m_data.index(dst) ==
StorageIndex(
j)) assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(
j));
1082 else if (dst ==
end && capacity > 0)
1083 assignFunc.assignCoeff(insertBackUncompressed(
j,
j), diaEval.coeff(
j));
1087 deferredInsertions++;
1089 if (capacity == 0) shift++;
1093 if (deferredInsertions > 0) {
1095 m_data.resize(m_data.size() + shift);
1096 Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1097 : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1098 for (
Index j = m_outerSize - 1; deferredInsertions > 0;
j--) {
1099 Index begin = m_outerIndex[
j];
1100 Index end = isCompressed() ? m_outerIndex[
j + 1] : begin + m_innerNonZeros[
j];
1101 Index capacity = m_outerIndex[
j + 1] -
end;
1103 bool doInsertion = insertionLocations(
j) >= 0;
1104 bool breakUpCopy = doInsertion && (capacity > 0);
1110 Index copyBegin = m_outerIndex[
j + 1];
1111 Index to = copyBegin + shift;
1112 Index chunkSize = copyEnd - copyBegin;
1113 m_data.moveChunk(copyBegin, to, chunkSize);
1117 m_outerIndex[
j + 1] += shift;
1121 if (capacity > 0) shift++;
1122 Index copyBegin = insertionLocations(
j);
1123 Index to = copyBegin + shift;
1124 Index chunkSize = copyEnd - copyBegin;
1125 m_data.moveChunk(copyBegin, to, chunkSize);
1128 m_data.value(dst) =
Scalar(0);
1129 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(
j));
1130 if (!isCompressed()) m_innerNonZeros[
j]++;
1132 deferredInsertions--;
1133 copyEnd = copyBegin;
1137 eigen_assert((shift == 0) && (deferredInsertions == 0));
1150 struct default_prunning_func {
1151 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
1165 template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1167 DupFunctor dup_func) {
1168 constexpr
bool IsRowMajor = SparseMatrixType::IsRowMajor;
1169 using StorageIndex =
typename SparseMatrixType::StorageIndex;
1173 if (begin ==
end)
return;
1179 TransposedSparseMatrix trmat(
mat.rows(),
mat.cols());
1183 for (InputIterator it(begin); it !=
end; ++it) {
1184 eigen_assert(it->row() >= 0 && it->row() <
mat.rows() && it->col() >= 0 && it->col() <
mat.cols());
1185 StorageIndex
j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1187 trmat.outerIndexPtr()[
j + 1]++;
1191 std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1192 eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1193 trmat.resizeNonZeros(nonZeros);
1197 smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1200 for (InputIterator it(begin); it !=
end; ++it) {
1201 StorageIndex
j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1202 StorageIndex
i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1203 StorageIndex k = tmp[
j];
1204 trmat.data().index(k) =
i;
1205 trmat.data().value(k) = it->value();
1209 IndexMap wi(tmp, trmat.innerSize());
1210 trmat.collapseDuplicates(wi, dup_func);
1216 template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1218 DupFunctor dup_func) {
1219 constexpr
bool IsRowMajor = SparseMatrixType::IsRowMajor;
1220 using StorageIndex =
typename SparseMatrixType::StorageIndex;
1222 if (begin ==
end)
return;
1224 constexpr StorageIndex kEmptyIndexValue(-1);
1228 StorageIndex previous_j = kEmptyIndexValue;
1229 StorageIndex previous_i = kEmptyIndexValue;
1232 for (InputIterator it(begin); it !=
end; ++it) {
1233 eigen_assert(it->row() >= 0 && it->row() <
mat.rows() && it->col() >= 0 && it->col() <
mat.cols());
1234 StorageIndex
j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1235 StorageIndex
i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1238 bool duplicate = (previous_j ==
j) && (previous_i ==
i);
1242 mat.outerIndexPtr()[
j + 1]++;
1249 std::partial_sum(
mat.outerIndexPtr(),
mat.outerIndexPtr() +
mat.outerSize() + 1,
mat.outerIndexPtr());
1251 mat.resizeNonZeros(nonZeros);
1253 previous_i = kEmptyIndexValue;
1254 previous_j = kEmptyIndexValue;
1256 for (InputIterator it(begin); it !=
end; ++it) {
1257 StorageIndex
j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1258 StorageIndex
i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1259 bool duplicate = (previous_j ==
j) && (previous_i ==
i);
1261 mat.data().value(back - 1) = dup_func(
mat.data().value(back - 1), it->value());
1264 mat.data().index(back) =
i;
1265 mat.data().value(back) = it->value();
1276 template<
typename DupFunctor,
typename LhsScalar,
typename RhsScalar = LhsScalar>
1277 struct scalar_disjunction_op
1279 using result_type =
typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1280 scalar_disjunction_op(
const DupFunctor& op) : m_functor(op) {}
1282 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const DupFunctor& functor()
const {
return m_functor; }
1283 const DupFunctor& m_functor;
1286 template <
typename DupFunctor,
typename LhsScalar,
typename RhsScalar>
1287 struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> :
public functor_traits<DupFunctor> {};
1290 template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1292 DupFunctor dup_func) {
1293 using Scalar =
typename SparseMatrixType::Scalar;
1297 SparseMatrixType trips(
mat.rows(),
mat.cols());
1300 SrcXprType src =
mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1302 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(
mat, src);
1306 template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1308 DupFunctor dup_func) {
1309 using Scalar =
typename SparseMatrixType::Scalar;
1313 SparseMatrixType trips(
mat.rows(),
mat.cols());
1316 SrcXprType src =
mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1318 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(
mat, src);
1360 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1361 template<
typename InputIterators>
1364 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_> >(begin,
end, *
this, internal::scalar_sum_op<Scalar,Scalar>());
1376 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1377 template<
typename InputIterators,
typename DupFunctor>
1380 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin,
end, *
this, dup_func);
1390 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1391 template<
typename InputIterators>
1394 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin,
end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1406 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1407 template<
typename InputIterators,
typename DupFunctor>
1410 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin,
end, *
this, dup_func);
1450 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1451 template<
typename InputIterators>
1454 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin,
end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1466 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1467 template<
typename InputIterators,
typename DupFunctor>
1470 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin,
end, *
this, dup_func);
1480 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1481 template<
typename InputIterators>
1484 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin,
end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1496 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1497 template<
typename InputIterators,
typename DupFunctor>
1500 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin,
end, *
this, dup_func);
1504 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1505 template <
typename Derived,
typename DupFunctor>
1514 const bool is_compressed = isCompressed();
1516 for (
Index j = 0;
j < m_outerSize; ++
j) {
1518 const StorageIndex end = is_compressed ? m_outerIndex[
j + 1] : m_outerIndex[
j] + m_innerNonZeros[
j];
1521 if (wi(
i) >= newBegin) {
1524 m_data.value(wi(
i)) = dup_func(m_data.value(wi(
i)), m_data.value(k));
1528 m_data.index(count) =
i;
1529 m_data.value(count) = m_data.value(k);
1534 m_outerIndex[
j] = newBegin;
1536 m_outerIndex[m_outerSize] = count;
1540 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1541 m_innerNonZeros = 0;
1545 template<
typename Scalar,
int Options_,
typename StorageIndex_>
1546 template<
typename OtherDerived>
1550 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1552 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1553 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1556 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags &
RowMajorBit);
1557 if (needToTranspose)
1559 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1560 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1566 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1568 typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1569 OtherCopy otherCopy(other.
derived());
1570 OtherCopyEval otherCopyEval(otherCopy);
1577 for (
Index j=0;
j<otherCopy.outerSize(); ++
j)
1578 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval,
j); it; ++it)
1588 positions[
j] = count;
1593 dest.
m_data.resize(count);
1597 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval,
j); it; ++it)
1599 Index pos = positions[it.index()]++;
1601 dest.
m_data.value(pos) = it.value();
1611 initAssignment(other.
derived());
1614 return Base::operator=(other.
derived());
1618 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1621 return insertByOuterInner(IsRowMajor ?
row :
col, IsRowMajor ?
col :
row);
1624 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1629 return insertUncompressedAtByOuterInner(outer, inner, dst);
1632 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1638 Index start = m_outerIndex[outer];
1639 Index end = start + m_innerNonZeros[outer];
1640 Index dst = start ==
end ?
end : m_data.searchLowerIndex(start,
end, inner);
1642 Index capacity = m_outerIndex[outer + 1] -
end;
1645 m_innerNonZeros[outer]++;
1648 return m_data.value(
end);
1652 "you cannot insert an element that already exists, you must call coeffRef to this end");
1653 return insertUncompressedAtByOuterInner(outer, inner, dst);
1656 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1662 Index start = m_outerIndex[outer];
1663 Index end = m_outerIndex[outer + 1];
1664 Index dst = start ==
end ?
end : m_data.searchLowerIndex(start,
end, inner);
1666 "you cannot insert an element that already exists, you must call coeffRef to this end");
1667 return insertCompressedAtByOuterInner(outer, inner, dst);
1670 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1676 if (m_data.allocatedSize() <= m_data.size()) {
1679 Index minReserve = 32;
1681 m_data.reserve(reserveSize);
1683 m_data.resize(m_data.size() + 1);
1684 Index chunkSize = m_outerIndex[m_outerSize] - dst;
1686 m_data.moveChunk(dst, dst + 1, chunkSize);
1689 for (
Index j = outer;
j < m_outerSize;
j++) m_outerIndex[
j + 1]++;
1692 m_data.value(dst) =
Scalar(0);
1694 return m_data.value(dst);
1697 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1702 for (
Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1703 if (rightTarget < m_outerSize) {
1704 Index start = m_outerIndex[rightTarget];
1705 Index end = start + m_innerNonZeros[rightTarget];
1706 Index nextStart = m_outerIndex[rightTarget + 1];
1711 if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1712 m_innerNonZeros[outer]++;
1713 for (
Index j = outer;
j < rightTarget;
j++) m_outerIndex[
j + 1]++;
1715 m_data.value(dst) =
Scalar(0);
1716 return m_data.value(dst);
1720 if (leftTarget >= 0) {
1721 Index start = m_outerIndex[leftTarget];
1722 Index end = start + m_innerNonZeros[leftTarget];
1723 Index nextStart = m_outerIndex[leftTarget + 1];
1728 Index chunkSize = dst - nextStart;
1729 if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1730 m_innerNonZeros[outer]++;
1731 for (
Index j = leftTarget;
j < outer;
j++) m_outerIndex[
j + 1]--;
1733 m_data.value(dst - 1) =
Scalar(0);
1734 return m_data.value(dst - 1);
1743 Index dst_offset = dst - m_outerIndex[outer];
1745 if (m_data.allocatedSize() == 0) {
1747 m_data.resize(m_outerSize);
1748 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1,
StorageIndex(0));
1753 if (m_outerSize <= maxReserveSize) {
1755 reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1759 typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1761 ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1762 reserveInnerVectors(reserveSizesXpr);
1766 Index start = m_outerIndex[outer];
1767 Index end = start + m_innerNonZeros[outer];
1768 Index new_dst = start + dst_offset;
1770 if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1771 m_innerNonZeros[outer]++;
1773 m_data.value(new_dst) =
Scalar(0);
1774 return m_data.value(new_dst);
1779 template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1780 struct evaluator<
SparseMatrix<Scalar_, Options_, StorageIndex_>>
1781 : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1782 typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1784 evaluator() : Base() {}
1785 explicit evaluator(
const SparseMatrixType&
mat) : Base(
mat) {}
1793 template <
typename Scalar,
int Options,
typename StorageIndex>
1810 num_storage_indices += value.
outerSize() + 1;
1813 num_storage_indices += inner_buffer_size;
1815 std::size_t num_values = inner_buffer_size;
1816 return sizeof(Header) +
sizeof(Scalar) * num_values +
1817 sizeof(StorageIndex) * num_storage_indices;
1825 const size_t header_bytes =
sizeof(Header);
1829 memcpy(dest, &header, header_bytes);
1830 dest += header_bytes;
1833 if (!header.compressed) {
1834 std::size_t data_bytes =
sizeof(StorageIndex) * header.outer_size;
1840 std::size_t data_bytes =
sizeof(StorageIndex) * (header.outer_size + 1);
1845 data_bytes =
sizeof(StorageIndex) * header.inner_buffer_size;
1850 data_bytes =
sizeof(Scalar) * header.inner_buffer_size;
1851 memcpy(dest, value.
valuePtr(), data_bytes);
1863 const size_t header_bytes =
sizeof(Header);
1866 memcpy(&header, src, header_bytes);
1867 src += header_bytes;
1870 value.
resize(header.rows, header.cols);
1871 if (header.compressed) {
1878 value.
data().resize(header.inner_buffer_size);
1881 if (!header.compressed) {
1883 std::size_t data_bytes =
sizeof(StorageIndex) * header.outer_size;
1890 std::size_t data_bytes =
sizeof(StorageIndex) * (header.outer_size + 1);
1896 data_bytes =
sizeof(StorageIndex) * header.inner_buffer_size;
1902 data_bytes =
sizeof(Scalar) * header.inner_buffer_size;
1904 memcpy(value.
valuePtr(), src, data_bytes);
Array< int, Dynamic, 1 > v
RowXpr row(Index i)
This is the const version of row(). */.
ColXpr col(Index i)
This is the const version of col().
IndexedView_or_Block operator()(const RowIndices &rowIndices, const ColIndices &colIndices)
#define EIGEN_USING_STD(FUNC)
#define eigen_internal_assert(x)
#define EIGEN_PREDICT_FALSE(x)
#define EIGEN_UNUSED_VARIABLE(var)
#define EIGEN_DEVICE_FUNC
#define EIGEN_DONT_INLINE
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
#define EIGEN_DBG_SPARSE(X)
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
#define EIGEN_STATIC_ASSERT(X, MSG)
Matrix< float, 1, Dynamic > MatrixType
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Generic expression of a matrix where all coefficients are defined by a functor.
Base class for all dense matrices, vectors, and arrays.
void resize(Index newSize)
Derived & setConstant(const Scalar &value)
EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT
Base class for diagonal matrices and expressions.
const Derived & derived() const
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
A matrix or vector expression mapping an existing array of data.
void evalTo(Dest &dst) const
uint8_t * serialize(uint8_t *dest, uint8_t *end, const SparseMat &value)
SparseMatrix< Scalar, Options, StorageIndex > SparseMat
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, SparseMat &value) const
size_t size(const SparseMat &value) const
Common base class for sparse [compressed]-{row|column}-storage format.
bool isCompressed() const
Derived & operator=(const Derived &other)
Base class of any sparse matrices or sparse expressions.
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
static StorageIndex convert_index(const Index idx)
StorageIndex operator[](Index i) const
SingletonVector(Index i, Index v)
A versatible sparse matrix representation.
EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
Scalar & insertBackUncompressed(Index row, Index col)
Base::IndexVector IndexVector
Scalar coeff(Index row, Index col) const
Scalar & insertCompressedAtByOuterInner(Index outer, Index inner, Index dst)
StorageIndex * innerIndexPtr()
StorageIndex * innerNonZeroPtr()
const Storage & data() const
void collapseDuplicates(DenseBase< Derived > &wi, DupFunctor dup_func=DupFunctor())
EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
EIGEN_DONT_INLINE SparseMatrix & operator=(const SparseMatrixBase< OtherDerived > &other)
const ConstDiagonalReturnType diagonal() const
void swap(SparseMatrix &other)
Scalar & insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst)
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
SparseMatrix & operator=(SparseMatrix &&other)
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
void startVec(Index outer)
StorageIndex * m_outerIndex
Scalar & insertBackByOuterInner(Index outer, Index inner)
void insertEmptyOuterVectors(Index j, Index num=1)
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
void reserveInnerVectors(const SizesType &reserveSizes)
SparseMatrix< Scalar, IsRowMajor ? ColMajor :RowMajor, StorageIndex > TransposedSparseMatrix
const StorageIndex * innerNonZeroPtr() const
Scalar & insertByOuterInner(Index j, Index i)
StorageIndex * outerIndexPtr()
Scalar & coeffRef(Index row, Index col)
const Scalar * valuePtr() const
void initAssignment(const Other &other)
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
EIGEN_STATIC_ASSERT((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Scalar & insertAtByOuterInner(Index outer, Index inner, Index dst)
void resize(Index rows, Index cols)
bool isCompressed() const
SparseMatrix(const SparseMatrix &other)
internal::CompressedStorage< Scalar, StorageIndex > Storage
StorageIndex * m_innerNonZeros
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
SparseMatrix(Index rows, Index cols)
void conservativeResize(Index rows, Index cols)
void insertFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
void resizeNonZeros(Index size)
Diagonal< SparseMatrix > DiagonalReturnType
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Diagonal< const SparseMatrix > ConstDiagonalReturnType
SparseCompressedBase< SparseMatrix > Base
void prune(const KeepFunc &keep=KeepFunc())
SparseMatrix & operator=(const SparseMatrix &other)
void removeOuterVectors(Index j, Index num=1)
const StorageIndex * outerIndexPtr() const
Scalar & insertBack(Index row, Index col)
void reserve(Index reserveSize)
const StorageIndex * innerIndexPtr() const
Scalar & insert(Index row, Index col)
void reserve(const SizesType &reserveSizes)
DiagonalReturnType diagonal()
friend std::ostream & operator<<(std::ostream &s, const SparseMatrix &m)
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Base::ScalarVector ScalarVector
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
SparseMatrix(SparseMatrix &&other)
Eigen::Map< SparseMatrix< Scalar, Options_, StorageIndex > > Map
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
static const lastp1_t end
const unsigned int LvalueBit
const unsigned int RowMajorBit
const unsigned int CompressedAccessBit
EIGEN_CONSTEXPR void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
typename remove_all< T >::type remove_all_t
void throw_std_bad_alloc()
IndexDest convert_index(const IndexSrc &idx)
void insert_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void insert_from_triplets_sorted(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void smart_memmove(const T *start, const T *end, T *target)
void set_from_triplets_sorted(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void smart_copy(const T *start, const T *end, T *target)
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
void swap(scoped_array< T > &a, scoped_array< T > &b)
EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
const unsigned int NestByRefBit
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
const int InnerRandomAccessPattern
Eigen::Index Index
The interface type of indices.
Derived & const_cast_derived() const
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
IndexPosPair(Index a_i, Index a_p)