SparseBlock.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_BLOCK_H
11 #define EIGEN_SPARSE_BLOCK_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
17 // Subset of columns or rows
18 template<typename XprType, int BlockRows, int BlockCols>
19 class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
20  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
21 {
24 public:
25  enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
26 protected:
27  enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
29  using Base::convert_index;
30 public:
32 
33  inline BlockImpl(XprType& xpr, Index i)
34  : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
35  {}
36 
37  inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
38  : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
39  {}
40 
41  EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
42  EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
43 
44  Index nonZeros() const
45  {
46  typedef internal::evaluator<XprType> EvaluatorType;
47  EvaluatorType matEval(m_matrix);
48  Index nnz = 0;
49  Index end = m_outerStart + m_outerSize.value();
50  for(Index j=m_outerStart; j<end; ++j)
51  for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
52  ++nnz;
53  return nnz;
54  }
55 
56  inline const Scalar coeff(Index row, Index col) const
57  {
58  return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
59  }
60 
61  inline const Scalar coeff(Index index) const
62  {
63  return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
64  }
65 
66  inline const XprType& nestedExpression() const { return m_matrix; }
67  inline XprType& nestedExpression() { return m_matrix; }
68  Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
69  Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
70  Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
71  Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
72 
73  protected:
74 
75  typename internal::ref_selector<XprType>::non_const_type m_matrix;
77  const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
78 
79  protected:
80  // Disable assignment with clear error message.
81  // Note that simply removing operator= yields compilation errors with ICC+MSVC
82  template<typename T>
84  {
85  EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
86  return *this;
87  }
88 };
89 
90 
91 
95 namespace internal {
96 
97 template<typename SparseMatrixType, int BlockRows, int BlockCols>
98 class sparse_matrix_block_impl
99  : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
100 {
101  typedef internal::remove_all_t<typename SparseMatrixType::Nested> MatrixTypeNested_;
102  typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
103  typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
104  using Base::convert_index;
105 public:
106  enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
108 protected:
109  typedef typename Base::IndexVector IndexVector;
110  enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
111 public:
112 
113  inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
114  : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
115  {}
116 
117  inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
118  : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
119  {}
120 
121  template<typename OtherDerived>
122  inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
123  {
124  typedef internal::remove_all_t<typename SparseMatrixType::Nested> NestedMatrixType_;
125  NestedMatrixType_& matrix = m_matrix;
126  // This assignment is slow if this vector set is not empty
127  // and/or it is not at the end of the nonzeros of the underlying matrix.
128 
129  // 1 - eval to a temporary to avoid transposition and/or aliasing issues
130  Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
131  eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
132 
133  // 2 - let's check whether there is enough allocated memory
134  Index nnz = tmp.nonZeros();
135  Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
136  Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
137  Index block_size = end - start; // available room in the current block
138  Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
139 
140  Index free_size = m_matrix.isCompressed()
141  ? Index(matrix.data().allocatedSize()) + block_size
142  : block_size;
143 
144  Index tmp_start = tmp.outerIndexPtr()[0];
145 
146  bool update_trailing_pointers = false;
147  if(nnz>free_size)
148  {
149  // realloc manually to reduce copies
150  typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
151 
152  internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
153  internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
154 
155  internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
156  internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
157 
158  internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
159  internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
160 
161  newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
162 
163  matrix.data().swap(newdata);
164 
165  update_trailing_pointers = true;
166  }
167  else
168  {
169  if(m_matrix.isCompressed() && nnz!=block_size)
170  {
171  // no need to realloc, simply copy the tail at its respective position and insert tmp
172  matrix.data().resize(start + nnz + tail_size);
173 
174  internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
175  internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
176 
177  update_trailing_pointers = true;
178  }
179 
180  internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
181  internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
182  }
183 
184  // update outer index pointers and innerNonZeros
185  if(IsVectorAtCompileTime)
186  {
187  if(!m_matrix.isCompressed())
188  matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
189  matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
190  }
191  else
192  {
193  StorageIndex p = StorageIndex(start);
194  for(Index k=0; k<m_outerSize.value(); ++k)
195  {
196  StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
197  if(!m_matrix.isCompressed())
198  matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
199  matrix.outerIndexPtr()[m_outerStart+k] = p;
200  p += nnz_k;
201  }
202  }
203 
204  if(update_trailing_pointers)
205  {
206  StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
207  for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
208  {
209  matrix.outerIndexPtr()[k] += offset;
210  }
211  }
212 
213  return derived();
214  }
215 
216  inline BlockType& operator=(const BlockType& other)
217  {
218  return operator=<BlockType>(other);
219  }
220 
221  inline const Scalar* valuePtr() const
222  { return m_matrix.valuePtr(); }
223  inline Scalar* valuePtr()
224  { return m_matrix.valuePtr(); }
225 
226  inline const StorageIndex* innerIndexPtr() const
227  { return m_matrix.innerIndexPtr(); }
228  inline StorageIndex* innerIndexPtr()
229  { return m_matrix.innerIndexPtr(); }
230 
231  inline const StorageIndex* outerIndexPtr() const
232  { return m_matrix.outerIndexPtr() + m_outerStart; }
233  inline StorageIndex* outerIndexPtr()
234  { return m_matrix.outerIndexPtr() + m_outerStart; }
235 
236  inline const StorageIndex* innerNonZeroPtr() const
237  { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
238  inline StorageIndex* innerNonZeroPtr()
239  { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
240 
241  bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
242 
243  inline Scalar& coeffRef(Index row, Index col)
244  {
245  return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
246  }
247 
248  inline const Scalar coeff(Index row, Index col) const
249  {
250  return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
251  }
252 
253  inline const Scalar coeff(Index index) const
254  {
255  return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
256  }
257 
258  const Scalar& lastCoeff() const
259  {
260  EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
262  if(m_matrix.isCompressed())
263  return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
264  else
265  return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
266  }
267 
268  EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
269  EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
270 
271  inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
272  inline SparseMatrixType& nestedExpression() { return m_matrix; }
273  Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
274  Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
275  Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
276  Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
277 
278  protected:
279 
280  typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
281  Index m_outerStart;
282  const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
283 
284 };
285 
286 } // namespace internal
287 
288 template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
289 class BlockImpl<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true,Sparse>
290  : public internal::sparse_matrix_block_impl<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols>
291 {
292 public:
293  typedef StorageIndex_ StorageIndex;
295  typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
297  : Base(xpr, i)
298  {}
299 
300  inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
301  : Base(xpr, startRow, startCol, blockRows, blockCols)
302  {}
303 
304  using Base::operator=;
305 };
306 
307 template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
308 class BlockImpl<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true,Sparse>
309  : public internal::sparse_matrix_block_impl<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols>
310 {
311 public:
312  typedef StorageIndex_ StorageIndex;
314  typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
316  : Base(xpr, i)
317  {}
318 
319  inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
320  : Base(xpr, startRow, startCol, blockRows, blockCols)
321  {}
322 
323  using Base::operator=;
324 private:
325  template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
326  template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
327 };
328 
329 //----------
330 
334 template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
335 class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
336  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
337 {
340  using Base::convert_index;
341 public:
342  enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
344 
345  typedef internal::remove_all_t<typename XprType::Nested> MatrixTypeNested_;
346 
349  inline BlockImpl(XprType& xpr, Index i)
350  : m_matrix(xpr),
351  m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
352  m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
353  m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
354  m_blockCols(BlockCols==1 ? 1 : xpr.cols())
355  {}
356 
359  inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
360  : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
361  {}
362 
363  inline Index rows() const { return m_blockRows.value(); }
364  inline Index cols() const { return m_blockCols.value(); }
365 
367  {
368  return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
369  }
370 
371  inline const Scalar coeff(Index row, Index col) const
372  {
373  return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
374  }
375 
376  inline Scalar& coeffRef(Index index)
377  {
378  return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
379  m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
380  }
381 
382  inline const Scalar coeff(Index index) const
383  {
384  return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
385  m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
386  }
387 
388  inline const XprType& nestedExpression() const { return m_matrix; }
389  inline XprType& nestedExpression() { return m_matrix; }
390  Index startRow() const { return m_startRow.value(); }
391  Index startCol() const { return m_startCol.value(); }
392  Index blockRows() const { return m_blockRows.value(); }
393  Index blockCols() const { return m_blockCols.value(); }
394 
395  protected:
396 // friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
397  friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
398 
399  Index nonZeros() const { return Dynamic; }
400 
401  typename internal::ref_selector<XprType>::non_const_type m_matrix;
402  const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
403  const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
404  const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
405  const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
406 
407  protected:
408  // Disable assignment with clear error message.
409  // Note that simply removing operator= yields compilation errors with ICC+MSVC
410  template<typename T>
412  {
413  EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
414  return *this;
415  }
416 
417 };
418 
419 namespace internal {
420 
421 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
422 struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
423  : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
424 {
425  class InnerVectorInnerIterator;
426  class OuterVectorInnerIterator;
427  public:
428  typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
429  typedef typename XprType::StorageIndex StorageIndex;
430  typedef typename XprType::Scalar Scalar;
431 
432  enum {
433  IsRowMajor = XprType::IsRowMajor,
434  OuterVector = (BlockCols == 1 && ArgType::IsRowMajor) || (BlockRows == 1 && !ArgType::IsRowMajor),
435  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
436  Flags = XprType::Flags
437  };
438 
439  typedef std::conditional_t<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator> InnerIterator;
440 
441  explicit unary_evaluator(const XprType& op)
442  : m_argImpl(op.nestedExpression()), m_block(op)
443  {}
444 
445  inline Index nonZerosEstimate() const {
446  const Index nnz = m_block.nonZeros();
447  if(nnz < 0) {
448  // Scale the non-zero estimate for the underlying expression linearly with block size.
449  // Return zero if the underlying block is empty.
450  const Index nested_sz = m_block.nestedExpression().size();
451  return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
452  }
453  return nnz;
454  }
455 
456  protected:
457  typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
458 
459  evaluator<ArgType> m_argImpl;
460  const XprType &m_block;
461 };
462 
463 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
464 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
465  : public EvalIterator
466 {
467  // NOTE MSVC fails to compile if we don't explicitly "import" IsRowMajor from unary_evaluator
468  // because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
469  // NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
470  enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
471  const XprType& m_block;
472  Index m_end;
473 public:
474 
475  EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
476  : EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
477  m_block(aEval.m_block),
478  m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
479  {
480  while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
482  }
483 
484  inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
485  inline Index outer() const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
486  inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
487  inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
488 
489  inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
490 };
491 
492 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
493 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
494 {
495  // NOTE see above
496  enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
497  const unary_evaluator& m_eval;
498  Index m_outerPos;
499  const Index m_innerIndex;
500  Index m_end;
501  EvalIterator m_it;
502 public:
503 
504  EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
505  : m_eval(aEval),
506  m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
507  m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
508  m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
509  m_it(m_eval.m_argImpl, m_outerPos)
510  {
511  EIGEN_UNUSED_VARIABLE(outer);
512  eigen_assert(outer==0);
513 
514  while(m_it && m_it.index() < m_innerIndex) ++m_it;
515  if((!m_it) || (m_it.index()!=m_innerIndex))
516  ++(*this);
517  }
518 
519  inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
520  inline Index outer() const { return 0; }
521  inline Index row() const { return XprIsRowMajor ? 0 : index(); }
522  inline Index col() const { return XprIsRowMajor ? index() : 0; }
523 
524  inline Scalar value() const { return m_it.value(); }
525  inline Scalar& valueRef() { return m_it.valueRef(); }
526 
527  inline OuterVectorInnerIterator& operator++()
528  {
529  // search next non-zero entry
530  while(++m_outerPos<m_end)
531  {
532  // Restart iterator at the next inner-vector:
533  internal::destroy_at(&m_it);
534  internal::construct_at(&m_it, m_eval.m_argImpl, m_outerPos);
535  // search for the key m_innerIndex in the current outer-vector
536  while(m_it && m_it.index() < m_innerIndex) ++m_it;
537  if(m_it && m_it.index()==m_innerIndex) break;
538  }
539  return *this;
540  }
541 
542  inline operator bool() const { return m_outerPos < m_end; }
543 };
544 
545 template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
546 struct unary_evaluator<Block<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true>, IteratorBased>
547  : evaluator<SparseCompressedBase<Block<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> > >
548 {
549  typedef Block<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> XprType;
550  typedef evaluator<SparseCompressedBase<XprType> > Base;
551  explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
552 };
553 
554 template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
555 struct unary_evaluator<Block<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true>, IteratorBased>
556  : evaluator<SparseCompressedBase<Block<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> > >
557 {
558  typedef Block<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> XprType;
559  typedef evaluator<SparseCompressedBase<XprType> > Base;
560  explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
561 };
562 
563 } // end namespace internal
564 
565 
566 } // end namespace Eigen
567 
568 #endif // EIGEN_SPARSE_BLOCK_H
RowXpr row(Index i)
This is the const version of row(). *‍/.
ColXpr col(Index i)
This is the const version of col().
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:957
#define eigen_assert(x)
Definition: Macros.h:902
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:45
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE)
Definition: StaticAssert.h:36
float * p
internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols > Base
Definition: SparseBlock.h:295
BlockImpl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:300
internal::ref_selector< XprType >::non_const_type m_matrix
Definition: SparseBlock.h:401
const internal::variable_if_dynamic< Index, XprType::ColsAtCompileTime==1 ? 0 :Dynamic > m_startCol
Definition: SparseBlock.h:403
BlockImpl(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:359
internal::remove_all_t< typename XprType::Nested > MatrixTypeNested_
Definition: SparseBlock.h:345
const internal::variable_if_dynamic< Index, RowsAtCompileTime > m_blockRows
Definition: SparseBlock.h:404
const internal::variable_if_dynamic< Index, ColsAtCompileTime > m_blockCols
Definition: SparseBlock.h:405
Block< XprType, BlockRows, BlockCols, InnerPanel > BlockType
Definition: SparseBlock.h:338
const internal::variable_if_dynamic< Index, XprType::RowsAtCompileTime==1 ? 0 :Dynamic > m_startRow
Definition: SparseBlock.h:402
Block< XprType, BlockRows, BlockCols, true > BlockType
Definition: SparseBlock.h:23
const internal::variable_if_dynamic< Index, OuterSize > m_outerSize
Definition: SparseBlock.h:77
const Scalar coeff(Index row, Index col) const
Definition: SparseBlock.h:56
internal::ref_selector< XprType >::non_const_type m_matrix
Definition: SparseBlock.h:75
BlockImpl(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:37
internal::remove_all_t< typename XprType::Nested > MatrixTypeNested_
Definition: SparseBlock.h:22
BlockImpl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:319
internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols > Base
Definition: SparseBlock.h:314
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:107
Block< SparseMatrixType, BlockRows, BlockCols, true > & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseAssign.h:19
Base class of any sparse matrices or sparse expressions.
internal::traits< Block< SparseMatrixType, BlockRows, BlockCols, true > >::StorageIndex StorageIndex
internal::traits< Block< XprType, BlockRows, BlockCols, true > >::Scalar Scalar
A versatible sparse matrix representation.
Definition: SparseMatrix.h:125
static const lastp1_t end
bfloat16 operator++(bfloat16 &a)
Definition: BFloat16.h:298
void destroy_at(T *p)
Definition: Memory.h:1264
typename remove_all< T >::type remove_all_t
Definition: Meta.h:119
IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:64
T * construct_at(T *p, Args &&... args)
Definition: Memory.h:1248
void smart_memmove(const T *start, const T *end, T *target)
Definition: Memory.h:625
void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:601
: InteropHeaders
Definition: Core:139
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const int Dynamic
Definition: Constants.h:24
Block< SparseMatrixType, BlockRows, BlockCols, true > & derived()
Definition: EigenBase.h:48
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41
std::ptrdiff_t j