BlockSparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
5 // Copyright (C) 2013 Gael Guennebaud <gael.guennebaud@inria.fr>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_SPARSEBLOCKMATRIX_H
12 #define EIGEN_SPARSEBLOCKMATRIX_H
13 
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
56 template<typename Scalar_, int _BlockAtCompileTime=Dynamic, int Options_=ColMajor, typename StorageIndex_=int> class BlockSparseMatrix;
57 
58 template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
59 
60 namespace internal {
61 template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename Index_>
62 struct traits<BlockSparseMatrix<Scalar_,_BlockAtCompileTime,Options_, Index_> >
63 {
64  typedef Scalar_ Scalar;
65  typedef Index_ Index;
66  typedef Sparse StorageKind; // FIXME Where is it used ??
67  typedef MatrixXpr XprKind;
68  enum {
69  RowsAtCompileTime = Dynamic,
70  ColsAtCompileTime = Dynamic,
71  MaxRowsAtCompileTime = Dynamic,
72  MaxColsAtCompileTime = Dynamic,
73  BlockSize = _BlockAtCompileTime,
74  Flags = Options_ | NestByRefBit | LvalueBit,
75  CoeffReadCost = NumTraits<Scalar>::ReadCost,
76  SupportedAccessPatterns = InnerRandomAccessPattern
77  };
78 };
79 template<typename BlockSparseMatrixT>
80 struct traits<BlockSparseMatrixView<BlockSparseMatrixT> >
81 {
82  typedef Ref<Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > Scalar;
83  typedef Ref<Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > RealScalar;
84 
85 };
86 
87 // Function object to sort a triplet list
88 template<typename Iterator, bool IsColMajor>
89 struct TripletComp
90 {
91  typedef typename Iterator::value_type Triplet;
92  bool operator()(const Triplet& a, const Triplet& b)
93  { if(IsColMajor)
94  return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
95  else
96  return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
97  }
98 };
99 } // end namespace internal
100 
101 
102 /* Proxy to view the block sparse matrix as a regular sparse matrix */
103 template<typename BlockSparseMatrixT>
104 class BlockSparseMatrixView : public SparseMatrixBase<BlockSparseMatrixT>
105 {
106  public:
109  typedef typename BlockSparseMatrixT::Index Index;
110  typedef BlockSparseMatrixT Nested;
111  enum {
112  Flags = BlockSparseMatrixT::Options,
113  Options = BlockSparseMatrixT::Options,
114  RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
115  ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
116  MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
117  MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
118  };
119  public:
120  BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat)
121  : m_spblockmat(spblockmat)
122  {}
123 
124  Index outerSize() const
125  {
126  return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols();
127  }
128  Index cols() const
129  {
130  return m_spblockmat.blockCols();
131  }
132  Index rows() const
133  {
134  return m_spblockmat.blockRows();
135  }
137  {
138  return m_spblockmat.coeff(row, col);
139  }
141  {
142  return m_spblockmat.coeffRef(row, col);
143  }
144  // Wrapper to iterate over all blocks
145  class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator
146  {
147  public:
148  InnerIterator(const BlockSparseMatrixView& mat, Index outer)
149  : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer)
150  {}
151 
152  };
153 
154  protected:
155  const BlockSparseMatrixT& m_spblockmat;
156 };
157 
158 // Proxy to view a regular vector as a block vector
159 template<typename BlockSparseMatrixT, typename VectorType>
161 {
162  public:
163  enum {
164  BlockSize = BlockSparseMatrixT::BlockSize,
165  ColsAtCompileTime = VectorType::ColsAtCompileTime,
166  RowsAtCompileTime = VectorType::RowsAtCompileTime,
167  Flags = VectorType::Flags
168  };
169  typedef Ref<const Matrix<typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime==1)? 1 : BlockSize, (ColsAtCompileTime==1)? 1 : BlockSize> >Scalar;
170  typedef typename BlockSparseMatrixT::Index Index;
171  public:
172  BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec)
173  : m_spblockmat(spblockmat),m_vec(vec)
174  { }
175  inline Index cols() const
176  {
177  return m_vec.cols();
178  }
179  inline Index size() const
180  {
181  return m_spblockmat.blockRows();
182  }
183  inline Scalar coeff(Index bi) const
184  {
185  Index startRow = m_spblockmat.blockRowsIndex(bi);
186  Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
187  return m_vec.middleRows(startRow, rowSize);
188  }
189  inline Scalar coeff(Index bi, Index j) const
190  {
191  Index startRow = m_spblockmat.blockRowsIndex(bi);
192  Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
193  return m_vec.block(startRow, j, rowSize, 1);
194  }
195  protected:
196  const BlockSparseMatrixT& m_spblockmat;
197  const VectorType& m_vec;
198 };
199 
200 template<typename VectorType, typename Index> class BlockVectorReturn;
201 
202 
203 // Proxy to view a regular vector as a block vector
204 template<typename BlockSparseMatrixT, typename VectorType>
206 {
207  public:
208  enum {
209  ColsAtCompileTime = VectorType::ColsAtCompileTime,
210  RowsAtCompileTime = VectorType::RowsAtCompileTime,
211  Flags = VectorType::Flags
212  };
214  typedef typename BlockSparseMatrixT::Index Index;
215  public:
216  BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec)
217  : m_spblockmat(spblockmat),m_vec(vec)
218  { }
219  inline Index size() const
220  {
221  return m_spblockmat.blockRows();
222  }
223  inline Scalar coeffRef(Index bi)
224  {
225  Index startRow = m_spblockmat.blockRowsIndex(bi);
226  Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
227  return m_vec.middleRows(startRow, rowSize);
228  }
229  inline Scalar coeffRef(Index bi, Index j)
230  {
231  Index startRow = m_spblockmat.blockRowsIndex(bi);
232  Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
233  return m_vec.block(startRow, j, rowSize, 1);
234  }
235 
236  protected:
237  const BlockSparseMatrixT& m_spblockmat;
238  VectorType& m_vec;
239 };
240 
241 // Block version of the sparse dense product
242 template<typename Lhs, typename Rhs>
244 
245 namespace internal {
246 
247 template<typename BlockSparseMatrixT, typename VecType>
248 struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> >
249 {
250  typedef Dense StorageKind;
251  typedef MatrixXpr XprKind;
252  typedef typename BlockSparseMatrixT::Scalar Scalar;
253  typedef typename BlockSparseMatrixT::Index Index;
254  enum {
255  RowsAtCompileTime = Dynamic,
256  ColsAtCompileTime = Dynamic,
257  MaxRowsAtCompileTime = Dynamic,
258  MaxColsAtCompileTime = Dynamic,
259  Flags = 0,
260  CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
261  };
262 };
263 } // end namespace internal
264 
265 template<typename Lhs, typename Rhs>
267  : public ProductBase<BlockSparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
268 {
269  public:
270  EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
271 
272  BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
273  {}
274 
275  template<typename Dest> void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const
276  {
277  BlockVectorReturn<Lhs,Dest> tmpDest(m_lhs, dest);
279  }
280 
281  private:
283 };
284 
285 template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename StorageIndex_>
286 class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<Scalar_,_BlockAtCompileTime, Options_,StorageIndex_> >
287 {
288  public:
289  typedef Scalar_ Scalar;
291  typedef StorageIndex_ StorageIndex;
292  typedef typename internal::ref_selector<BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_> >::type Nested;
293 
294  enum {
295  Options = Options_,
297  BlockSize=_BlockAtCompileTime,
303  IsColMajor = Flags&RowMajorBit ? 0 : 1
304  };
307  typedef std::conditional_t<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar> BlockScalarReturnType;
309  public:
310  // Default constructor
315  { }
316 
317 
323  : m_innerBSize(IsColMajor ? brow : bcol),
324  m_outerBSize(IsColMajor ? bcol : brow),
326  m_values(0),m_blockPtr(0),m_indices(0),
328  { }
329 
337  {
338  // should we allow copying between variable-size blocks and fixed-size blocks ??
339  eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
340 
341  std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset);
342  std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset);
343  std::copy(other.m_values, other.m_values+m_nonzeros, m_values);
344 
345  if(m_blockSize != Dynamic)
346  std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr);
347 
348  std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices);
349  std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex);
350  }
351 
352  friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second)
353  {
354  std::swap(first.m_innerBSize, second.m_innerBSize);
355  std::swap(first.m_outerBSize, second.m_outerBSize);
356  std::swap(first.m_innerOffset, second.m_innerOffset);
357  std::swap(first.m_outerOffset, second.m_outerOffset);
358  std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
359  std::swap(first.m_nonzeros, second.m_nonzeros);
360  std::swap(first.m_values, second.m_values);
361  std::swap(first.m_blockPtr, second.m_blockPtr);
362  std::swap(first.m_indices, second.m_indices);
363  std::swap(first.m_outerIndex, second.m_outerIndex);
364  std::swap(first.m_BlockSize, second.m_blockSize);
365  }
366 
368  {
369  //Copy-and-swap paradigm ... avoid leaked data if thrown
370  swap(*this, other);
371  return *this;
372  }
373 
374  // Destructor
376  {
377  delete[] m_outerIndex;
378  delete[] m_innerOffset;
379  delete[] m_outerOffset;
380  delete[] m_indices;
381  delete[] m_blockPtr;
382  delete[] m_values;
383  }
384 
385 
390  template<typename MatrixType>
392  {
393  EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
394 
395  *this = spmat;
396  }
397 
406  template<typename MatrixType>
407  inline BlockSparseMatrix& operator=(const MatrixType& spmat)
408  {
409  eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
410  && "Trying to assign to a zero-size matrix, call resize() first");
411  eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order");
413  MatrixPatternType blockPattern(blockRows(), blockCols());
414  m_nonzeros = 0;
415 
416  // First, compute the number of nonzero blocks and their locations
417  for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
418  {
419  // Browse each outer block and compute the structure
420  std::vector<bool> nzblocksFlag(m_innerBSize,false); // Record the existing blocks
421  blockPattern.startVec(bj);
422  for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
423  {
424  typename MatrixType::InnerIterator it_spmat(spmat, j);
425  for(; it_spmat; ++it_spmat)
426  {
427  StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
428  if(!nzblocksFlag[bi])
429  {
430  // Save the index of this nonzero block
431  nzblocksFlag[bi] = true;
432  blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true;
433  // Compute the total number of nonzeros (including explicit zeros in blocks)
435  }
436  }
437  } // end current outer block
438  }
439  blockPattern.finalize();
440 
441  // Allocate the internal arrays
442  setBlockStructure(blockPattern);
443 
444  for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
445  for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
446  {
447  // Now copy the values
448  for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
449  {
450  // Browse the outer block column by column (for column-major matrices)
451  typename MatrixType::InnerIterator it_spmat(spmat, j);
452  for(; it_spmat; ++it_spmat)
453  {
454  StorageIndex idx = 0; // Position of this block in the column block
455  StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
456  // Go to the inner block where this element belongs to
457  while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
458  StorageIndex idxVal;// Get the right position in the array of values for this element
459  if(m_blockSize == Dynamic)
460  {
461  // Offset from all blocks before ...
462  idxVal = m_blockPtr[m_outerIndex[bj]+idx];
463  // ... and offset inside the block
464  idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
465  }
466  else
467  {
468  // All blocks before
469  idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
470  // inside the block
471  idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
472  }
473  // Insert the value
474  m_values[idxVal] = it_spmat.value();
475  } // end of this column
476  } // end of this block
477  } // end of this outer block
478 
479  return *this;
480  }
481 
499  template<typename MatrixType>
500  void setBlockStructure(const MatrixType& blockPattern)
501  {
502  resize(blockPattern.rows(), blockPattern.cols());
503  reserve(blockPattern.nonZeros());
504 
505  // Browse the block pattern and set up the various pointers
506  m_outerIndex[0] = 0;
507  if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
508  for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
509  for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
510  {
511  //Browse each outer block
512 
513  //First, copy and save the indices of nonzero blocks
514  //FIXME : find a way to avoid this ...
515  std::vector<int> nzBlockIdx;
516  typename MatrixType::InnerIterator it(blockPattern, bj);
517  for(; it; ++it)
518  {
519  nzBlockIdx.push_back(it.index());
520  }
521  std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
522 
523  // Now, fill block indices and (eventually) pointers to blocks
524  for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
525  {
526  StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
527  m_indices[offset] = nzBlockIdx[idx];
528  if(m_blockSize == Dynamic)
529  m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
530  // There is no blockPtr for fixed-size blocks... not needed !???
531  }
532  // Save the pointer to the next outer block
533  m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
534  }
535  }
536 
540  inline void resize(Index brow, Index bcol)
541  {
542  m_innerBSize = IsColMajor ? brow : bcol;
543  m_outerBSize = IsColMajor ? bcol : brow;
544  }
545 
551  inline void setBlockSize(Index blockSize)
552  {
553  m_blockSize = blockSize;
554  }
555 
565  inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
566  {
567  const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
568  const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
569  eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
570  eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
571  m_outerBSize = outerBlocks.size();
572  // starting index of blocks... cumulative sums
575  m_innerOffset[0] = 0;
576  m_outerOffset[0] = 0;
577  std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
578  std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
579 
580  // Compute the total number of nonzeros
581  m_nonzeros = 0;
582  for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
583  for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
584  m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
585 
586  }
587 
598  inline void reserve(const Index nonzerosblocks)
599  {
600  eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
601  "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
602 
603  //FIXME Should free if already allocated
605 
606  m_nonzerosblocks = nonzerosblocks;
607  if(m_blockSize != Dynamic)
608  {
609  m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
610  m_blockPtr = 0;
611  }
612  else
613  {
614  // m_nonzeros is already computed in setBlockLayout()
616  }
618  m_values = new Scalar[m_nonzeros];
619  }
620 
621 
632  template<typename InputIterator>
633  void setFromTriplets(const InputIterator& begin, const InputIterator& end)
634  {
635  eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
636 
637  /* First, sort the triplet list
638  * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted
639  * The best approach is like in SparseMatrix::setFromTriplets()
640  */
641  internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
642  std::sort(begin, end, tripletcomp);
643 
644  /* Count the number of rows and column blocks,
645  * and the number of nonzero blocks per outer dimension
646  */
647  VectorXi rowBlocks(m_innerBSize); // Size of each block row
648  VectorXi colBlocks(m_outerBSize); // Size of each block column
649  rowBlocks.setZero(); colBlocks.setZero();
650  VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector
651  VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks
652  nzblock_outer.setZero();
653  nz_outer.setZero();
654  for(InputIterator it(begin); it !=end; ++it)
655  {
656  eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
657  eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
658  || (m_blockSize == Dynamic));
659 
660  if(m_blockSize == Dynamic)
661  {
662  eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
663  "NON CORRESPONDING SIZES FOR ROW BLOCKS");
664  eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
665  "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
666  rowBlocks[it->row()] =it->value().rows();
667  colBlocks[it->col()] = it->value().cols();
668  }
669  nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
670  nzblock_outer(IsColMajor ? it->col() : it->row())++;
671  }
672  // Allocate member arrays
673  if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
674  StorageIndex nzblocks = nzblock_outer.sum();
675  reserve(nzblocks);
676 
677  // Temporary markers
678  VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion
679 
680  // Setup outer index pointers and markers
681  m_outerIndex[0] = 0;
682  if (m_blockSize == Dynamic) m_blockPtr[0] = 0;
683  for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
684  {
685  m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
686  block_id(bj) = m_outerIndex[bj];
687  if(m_blockSize==Dynamic)
688  {
689  m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
690  }
691  }
692 
693  // Fill the matrix
694  for(InputIterator it(begin); it!=end; ++it)
695  {
696  StorageIndex outer = IsColMajor ? it->col() : it->row();
697  StorageIndex inner = IsColMajor ? it->row() : it->col();
698  m_indices[block_id(outer)] = inner;
699  StorageIndex block_size = it->value().rows()*it->value().cols();
700  StorageIndex nz_marker = blockPtr(block_id[outer]);
701  memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
702  if(m_blockSize == Dynamic)
703  {
704  m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size;
705  }
706  block_id(outer)++;
707  }
708 
709  // An alternative when the outer indices are sorted...no need to use an array of markers
710 // for(Index bcol = 0; bcol < m_outerBSize; ++bcol)
711 // {
712 // Index id = 0, id_nz = 0, id_nzblock = 0;
713 // for(InputIterator it(begin); it!=end; ++it)
714 // {
715 // while (id<bcol) // one pass should do the job unless there are empty columns
716 // {
717 // id++;
718 // m_outerIndex[id+1]=m_outerIndex[id];
719 // }
720 // m_outerIndex[id+1] += 1;
721 // m_indices[id_nzblock]=brow;
722 // Index block_size = it->value().rows()*it->value().cols();
723 // m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size;
724 // id_nzblock++;
725 // memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar));
726 // id_nz += block_size;
727 // }
728 // while(id < m_outerBSize-1) // Empty columns at the end
729 // {
730 // id++;
731 // m_outerIndex[id+1]=m_outerIndex[id];
732 // }
733 // }
734  }
735 
736 
740  inline Index rows() const
741  {
742 // return blockRows();
743  return (IsColMajor ? innerSize() : outerSize());
744  }
745 
749  inline Index cols() const
750  {
751 // return blockCols();
752  return (IsColMajor ? outerSize() : innerSize());
753  }
754 
755  inline Index innerSize() const
756  {
758  else return (m_innerBSize * m_blockSize) ;
759  }
760 
761  inline Index outerSize() const
762  {
764  else return (m_outerBSize * m_blockSize) ;
765  }
767  inline Index blockRows() const
768  {
769  return (IsColMajor ? m_innerBSize : m_outerBSize);
770  }
772  inline Index blockCols() const
773  {
774  return (IsColMajor ? m_outerBSize : m_innerBSize);
775  }
776 
777  inline Index outerBlocks() const { return m_outerBSize; }
778  inline Index innerBlocks() const { return m_innerBSize; }
779 
781  inline Index outerToBlock(Index outer) const
782  {
783  eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
784 
785  if(m_blockSize != Dynamic)
786  return (outer / m_blockSize); // Integer division
787 
788  StorageIndex b_outer = 0;
789  while(m_outerOffset[b_outer] <= outer) ++b_outer;
790  return b_outer - 1;
791  }
793  inline Index innerToBlock(Index inner) const
794  {
795  eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
796 
797  if(m_blockSize != Dynamic)
798  return (inner / m_blockSize); // Integer division
799 
800  StorageIndex b_inner = 0;
801  while(m_innerOffset[b_inner] <= inner) ++b_inner;
802  return b_inner - 1;
803  }
804 
809  {
810  eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
811  eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
812 
813  StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
814  StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
815  StorageIndex inner = IsColMajor ? brow : bcol;
816  StorageIndex outer = IsColMajor ? bcol : brow;
817  StorageIndex offset = m_outerIndex[outer];
818  while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
819  offset++;
820  if(m_indices[offset] == inner)
821  {
822  return Map<BlockScalar>(&(m_values[blockPtr(offset)]), rsize, csize);
823  }
824  else
825  {
826  //FIXME the block does not exist, Insert it !!!!!!!!!
827  eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED");
828  }
829  }
830 
835  {
836  eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
837  eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
838 
839  StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
840  StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
841  StorageIndex inner = IsColMajor ? brow : bcol;
842  StorageIndex outer = IsColMajor ? bcol : brow;
843  StorageIndex offset = m_outerIndex[outer];
844  while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
845  if(m_indices[offset] == inner)
846  {
847  return Map<const BlockScalar> (&(m_values[blockPtr(offset)]), rsize, csize);
848  }
849  else
850 // return BlockScalar::Zero(rsize, csize);
851  eigen_assert("NOT YET SUPPORTED");
852  }
853 
854  // Block Matrix times vector product
855  template<typename VecType>
857  {
859  }
860 
862  inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
864  inline Index nonZeros() const { return m_nonzeros; }
865 
866  inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
867 // inline Scalar *valuePtr(){ return m_values; }
868  inline StorageIndex *innerIndexPtr() {return m_indices; }
869  inline const StorageIndex *innerIndexPtr() const {return m_indices; }
871  inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
872 
874  inline bool isCompressed() const {return true;}
878  inline Index blockRowsIndex(Index bi) const
879  {
880  return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
881  }
882 
886  inline Index blockColsIndex(Index bj) const
887  {
888  return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
889  }
890 
891  inline Index blockOuterIndex(Index bj) const
892  {
893  return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
894  }
895  inline Index blockInnerIndex(Index bi) const
896  {
897  return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
898  }
899 
900  // Not needed ???
901  inline Index blockInnerSize(Index bi) const
902  {
903  return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
904  }
905  inline Index blockOuterSize(Index bj) const
906  {
907  return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
908  }
909 
913  class InnerIterator; // Browse column by column
914 
918  class BlockInnerIterator; // Browse block by block
919 
920  friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
921  {
922  for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
923  {
924  BlockInnerIterator itb(m, j);
925  for(; itb; ++itb)
926  {
927  s << "("<<itb.row() << ", " << itb.col() << ")\n";
928  s << itb.value() <<"\n";
929  }
930  }
931  s << std::endl;
932  return s;
933  }
934 
938  Index blockPtr(Index id) const
939  {
940  if(m_blockSize == Dynamic) return m_blockPtr[id];
941  else return id * m_blockSize * m_blockSize;
942  //return blockDynIdx(id, std::conditional_t<(BlockSize==Dynamic), internal::true_type, internal::false_type>());
943  }
944 
945 
946  protected:
947 // inline Index blockDynIdx(Index id, internal::true_type) const
948 // {
949 // return m_blockPtr[id];
950 // }
951 // inline Index blockDynIdx(Index id, internal::false_type) const
952 // {
953 // return id * BlockSize * BlockSize;
954 // }
955 
956  // To be implemented
957  // Insert a block at a particular location... need to make a room for that
959 
960  Index m_innerBSize; // Number of block rows
961  Index m_outerBSize; // Number of block columns
962  StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
963  StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
964  Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize)
965  Index m_nonzeros; // Total nonzeros elements
966  Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
967  StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
968  StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
969  StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
970  Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
971 };
972 
973 template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename StorageIndex_>
974 class BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_>::BlockInnerIterator
975 {
976  public:
977 
978  enum{
979  Flags = Options_
980  };
981 
982  BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
983  : m_mat(mat),m_outer(outer),
984  m_id(mat.m_outerIndex[outer]),
985  m_end(mat.m_outerIndex[outer+1])
986  {
987  }
988 
989  inline BlockInnerIterator& operator++() {m_id++; return *this; }
990 
991  inline const Map<const BlockScalar> value() const
992  {
993  return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
994  rows(),cols());
995  }
996  inline Map<BlockScalar> valueRef()
997  {
998  return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
999  rows(),cols());
1000  }
1001  // Block inner index
1002  inline Index index() const {return m_mat.m_indices[m_id]; }
1003  inline Index outer() const { return m_outer; }
1004  // block row index
1005  inline Index row() const {return index(); }
1006  // block column index
1007  inline Index col() const {return outer(); }
1008  // FIXME Number of rows in the current block
1009  inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
1010  // Number of columns in the current block ...
1011  inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
1012  inline operator bool() const { return (m_id < m_end); }
1013 
1014  protected:
1015  const BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex>& m_mat;
1016  const Index m_outer;
1017  Index m_id;
1018  Index m_end;
1019 };
1020 
1021 template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename StorageIndex_>
1022 class BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_>::InnerIterator
1023 {
1024  public:
1025  InnerIterator(const BlockSparseMatrix& mat, Index outer)
1026  : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer),
1027  itb(mat, mat.outerToBlock(outer)),
1028  m_offset(outer - mat.blockOuterIndex(m_outerB))
1029  {
1030  if (itb)
1031  {
1032  m_id = m_mat.blockInnerIndex(itb.index());
1033  m_start = m_id;
1034  m_end = m_mat.blockInnerIndex(itb.index()+1);
1035  }
1036  }
1037  inline InnerIterator& operator++()
1038  {
1039  m_id++;
1040  if (m_id >= m_end)
1041  {
1042  ++itb;
1043  if (itb)
1044  {
1045  m_id = m_mat.blockInnerIndex(itb.index());
1046  m_start = m_id;
1047  m_end = m_mat.blockInnerIndex(itb.index()+1);
1048  }
1049  }
1050  return *this;
1051  }
1052  inline const Scalar& value() const
1053  {
1054  return itb.value().coeff(m_id - m_start, m_offset);
1055  }
1056  inline Scalar& valueRef()
1057  {
1058  return itb.valueRef().coeff(m_id - m_start, m_offset);
1059  }
1060  inline Index index() const { return m_id; }
1061  inline Index outer() const {return m_outer; }
1062  inline Index col() const {return outer(); }
1063  inline Index row() const { return index();}
1064  inline operator bool() const
1065  {
1066  return itb;
1067  }
1068  protected:
1069  const BlockSparseMatrix& m_mat;
1070  const Index m_outer;
1071  const Index m_outerB;
1072  BlockInnerIterator itb; // Iterator through the blocks
1073  const Index m_offset; // Position of this column in the block
1074  Index m_start; // starting inner index of this block
1075  Index m_id; // current inner index in the block
1076  Index m_end; // starting inner index of the next block
1077 
1078 };
1079 } // end namespace Eigen
1080 
1081 #endif // EIGEN_SPARSEBLOCKMATRIX_H
Matrix3f m
ArrayXXi a
RowXpr row(Index i) const
ColXpr col(Index i) const
IndexedView_or_VectorBlock operator()(const Indices &indices)
#define eigen_assert(x)
#define EIGEN_STATIC_ASSERT(X, MSG)
MatrixXf mat
Matrix< float, 1, Dynamic > MatrixType
BlockSparseMatrixView(const BlockSparseMatrixT &spblockmat)
Ref< typename BlockSparseMatrixT::BlockScalar > Scalar
Scalar coeff(Index row, Index col)
BlockSparseMatrixT::Index Index
Scalar coeffRef(Index row, Index col)
const BlockSparseMatrixT & m_spblockmat
Ref< typename BlockSparseMatrixT::BlockRealScalar > RealScalar
A versatile sparse matrix representation where each element is a block.
BlockSparseMatrix & operator=(const MatrixType &spmat)
Assignment from a sparse matrix with the same storage order.
Index innerToBlock(Index inner) const
void setBlockStructure(const MatrixType &blockPattern)
Set the nonzero block pattern of the matrix.
const StorageIndex * outerIndexPtr() const
Index blockInnerIndex(Index bi) const
friend std::ostream & operator<<(std::ostream &s, const BlockSparseMatrix &m)
BlockSparseMatrix(Index brow, Index bcol)
Construct and resize.
BlockSparseMatrix & operator=(BlockSparseMatrix other)
Index blockPtr(Index id) const
internal::ref_selector< BlockSparseMatrix< Scalar_, _BlockAtCompileTime, Options_, StorageIndex_ > >::type Nested
StorageIndex * innerIndexPtr()
void reserve(const Index nonzerosblocks)
Allocate the internal array of pointers to blocks and their inner indices.
Index outerToBlock(Index outer) const
Map< const BlockScalar > coeff(Index brow, Index bcol) const
Matrix< RealScalar, _BlockAtCompileTime, _BlockAtCompileTime, IsColMajor ? ColMajor :RowMajor > BlockRealScalar
Index blockOuterIndex(Index bj) const
Index blockRowsIndex(Index bi) const
Matrix< Scalar, _BlockAtCompileTime, _BlockAtCompileTime, IsColMajor ? ColMajor :RowMajor > BlockScalar
BlockSparseMatrix(const MatrixType &spmat)
Constructor from a sparse matrix.
void setFromTriplets(const InputIterator &begin, const InputIterator &end)
Fill values in a matrix from a triplet list.
void setBlockLayout(const VectorXi &rowBlocks, const VectorXi &colBlocks)
Set the row and column block layouts,.
Ref< BlockScalar > coeffRef(Index brow, Index bcol)
std::conditional_t< _BlockAtCompileTime==Dynamic, Scalar, BlockScalar > BlockScalarReturnType
NumTraits< Scalar >::Real RealScalar
Index blockInnerSize(Index bi) const
BlockSparseMatrix(const BlockSparseMatrix &other)
Copy-constructor.
StorageIndex * outerIndexPtr()
Index blockOuterSize(Index bj) const
void resize(Index brow, Index bcol)
Set the number of rows and columns blocks.
const StorageIndex * innerIndexPtr() const
friend void swap(BlockSparseMatrix &first, BlockSparseMatrix &second)
BlockSparseMatrix< Scalar, BlockSize, IsColMajor ? ColMajor :RowMajor, StorageIndex > PlainObject
bool isCompressed() const
for compatibility purposes with the SparseMatrix class
BlockSparseTimeDenseProduct< BlockSparseMatrix, VecType > operator*(const VecType &lhs) const
BlockScalarReturnType * valuePtr()
Index blockColsIndex(Index bj) const
void setBlockSize(Index blockSize)
set the block size at runtime for fixed-size block layout
Map< BlockScalar > insert(Index brow, Index bcol)
BlockSparseTimeDenseProduct & operator=(const BlockSparseTimeDenseProduct &)
void scaleAndAddTo(Dest &dest, const typename Rhs::Scalar &alpha) const
Ref< Matrix< typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime > > Scalar
BlockSparseMatrixT::Index Index
BlockVectorReturn(const BlockSparseMatrixT &spblockmat, VectorType &vec)
const BlockSparseMatrixT & m_spblockmat
Scalar coeffRef(Index bi, Index j)
BlockVectorView(const BlockSparseMatrixT &spblockmat, const VectorType &vec)
Ref< const Matrix< typename BlockSparseMatrixT::Scalar,(RowsAtCompileTime==1)? 1 :BlockSize,(ColsAtCompileTime==1)? 1 :BlockSize > > Scalar
BlockSparseMatrixT::Index Index
const BlockSparseMatrixT & m_spblockmat
Scalar coeff(Index bi) const
const VectorType & m_vec
Scalar coeff(Index bi, Index j) const
CoeffReturnType value() const
Scalar sum() const
Matrix< Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_ > & setZero(Index rows, Index cols)
static const lastp1_t end
const unsigned int LvalueBit
const unsigned int RowMajorBit
bfloat16 operator++(bfloat16 &a)
EIGEN_CONSTEXPR Index first(const T &x) EIGEN_NOEXCEPT
void sparse_time_dense_product(const SparseLhsType &lhs, const DenseRhsType &rhs, DenseResType &res, const AlphaType &alpha)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
const unsigned int NestByRefBit
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
const int InnerRandomAccessPattern
const int Dynamic
Eigen::Index Index
std::ptrdiff_t j