AmbiVector.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_AMBIVECTOR_H
11 #define EIGEN_AMBIVECTOR_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
17 namespace internal {
18 
24 template<typename Scalar_, typename StorageIndex_>
25 class AmbiVector
26 {
27  public:
28  typedef Scalar_ Scalar;
29  typedef StorageIndex_ StorageIndex;
30  typedef typename NumTraits<Scalar>::Real RealScalar;
31 
32  explicit AmbiVector(Index size)
33  : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
34  {
35  resize(size);
36  }
37 
38  void init(double estimatedDensity);
39  void init(int mode);
40 
41  Index nonZeros() const;
42 
44  void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
45 
46  void setZero();
47 
48  void restart();
49  Scalar& coeffRef(Index i);
50  Scalar& coeff(Index i);
51 
52  class Iterator;
53 
54  ~AmbiVector() { delete[] m_buffer; }
55 
56  void resize(Index size)
57  {
58  if (m_allocatedSize < size)
59  reallocate(size);
60  m_size = convert_index(size);
61  }
62 
63  StorageIndex size() const { return m_size; }
64 
65  protected:
66  StorageIndex convert_index(Index idx)
67  {
68  return internal::convert_index<StorageIndex>(idx);
69  }
70 
71  void reallocate(Index size)
72  {
73  // if the size of the matrix is not too large, let's allocate a bit more than needed such
74  // that we can handle dense vector even in sparse mode.
75  delete[] m_buffer;
76  if (size<1000)
77  {
78  Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
79  m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
80  m_buffer = new Scalar[allocSize];
81  }
82  else
83  {
84  m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
85  m_buffer = new Scalar[size];
86  }
87  m_size = convert_index(size);
88  m_start = 0;
89  m_end = m_size;
90  }
91 
92  void reallocateSparse()
93  {
94  Index copyElements = m_allocatedElements;
95  m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
96  Index allocSize = m_allocatedElements * sizeof(ListEl);
97  allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
98  Scalar* newBuffer = new Scalar[allocSize];
99  std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
100  delete[] m_buffer;
101  m_buffer = newBuffer;
102  }
103 
104  protected:
105  // element type of the linked list
106  struct ListEl
107  {
108  StorageIndex next;
109  StorageIndex index;
110  Scalar value;
111  };
112 
113  // used to store data in both mode
114  Scalar* m_buffer;
115  Scalar m_zero;
116  StorageIndex m_size;
117  StorageIndex m_start;
118  StorageIndex m_end;
119  StorageIndex m_allocatedSize;
120  StorageIndex m_allocatedElements;
121  StorageIndex m_mode;
122 
123  // linked list mode
124  StorageIndex m_llStart;
125  StorageIndex m_llCurrent;
126  StorageIndex m_llSize;
127 };
128 
130 template<typename Scalar_,typename StorageIndex_>
131 Index AmbiVector<Scalar_,StorageIndex_>::nonZeros() const
132 {
133  if (m_mode==IsSparse)
134  return m_llSize;
135  else
136  return m_end - m_start;
137 }
138 
139 template<typename Scalar_,typename StorageIndex_>
140 void AmbiVector<Scalar_,StorageIndex_>::init(double estimatedDensity)
141 {
142  if (estimatedDensity>0.1)
143  init(IsDense);
144  else
145  init(IsSparse);
146 }
147 
148 template<typename Scalar_,typename StorageIndex_>
149 void AmbiVector<Scalar_,StorageIndex_>::init(int mode)
150 {
151  m_mode = mode;
152  // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
153  // if (m_mode==IsSparse)
154  {
155  m_llSize = 0;
156  m_llStart = -1;
157  }
158 }
159 
165 template<typename Scalar_,typename StorageIndex_>
166 void AmbiVector<Scalar_,StorageIndex_>::restart()
167 {
168  m_llCurrent = m_llStart;
169 }
170 
172 template<typename Scalar_,typename StorageIndex_>
174 {
175  if (m_mode==IsDense)
176  {
177  for (Index i=m_start; i<m_end; ++i)
178  m_buffer[i] = Scalar(0);
179  }
180  else
181  {
182  eigen_assert(m_mode==IsSparse);
183  m_llSize = 0;
184  m_llStart = -1;
185  }
186 }
187 
188 template<typename Scalar_,typename StorageIndex_>
189 Scalar_& AmbiVector<Scalar_,StorageIndex_>::coeffRef(Index i)
190 {
191  if (m_mode==IsDense)
192  return m_buffer[i];
193  else
194  {
195  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
196  // TODO factorize the following code to reduce code generation
197  eigen_assert(m_mode==IsSparse);
198  if (m_llSize==0)
199  {
200  // this is the first element
201  m_llStart = 0;
202  m_llCurrent = 0;
203  ++m_llSize;
204  llElements[0].value = Scalar(0);
205  llElements[0].index = convert_index(i);
206  llElements[0].next = -1;
207  return llElements[0].value;
208  }
209  else if (i<llElements[m_llStart].index)
210  {
211  // this is going to be the new first element of the list
212  ListEl& el = llElements[m_llSize];
213  el.value = Scalar(0);
214  el.index = convert_index(i);
215  el.next = m_llStart;
216  m_llStart = m_llSize;
217  ++m_llSize;
218  m_llCurrent = m_llStart;
219  return el.value;
220  }
221  else
222  {
223  StorageIndex nextel = llElements[m_llCurrent].next;
224  eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
225  while (nextel >= 0 && llElements[nextel].index<=i)
226  {
227  m_llCurrent = nextel;
228  nextel = llElements[nextel].next;
229  }
230 
231  if (llElements[m_llCurrent].index==i)
232  {
233  // the coefficient already exists and we found it !
234  return llElements[m_llCurrent].value;
235  }
236  else
237  {
238  if (m_llSize>=m_allocatedElements)
239  {
240  reallocateSparse();
241  llElements = reinterpret_cast<ListEl*>(m_buffer);
242  }
243  eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
244  // let's insert a new coefficient
245  ListEl& el = llElements[m_llSize];
246  el.value = Scalar(0);
247  el.index = convert_index(i);
248  el.next = llElements[m_llCurrent].next;
249  llElements[m_llCurrent].next = m_llSize;
250  ++m_llSize;
251  return el.value;
252  }
253  }
254  }
255 }
256 
257 template<typename Scalar_,typename StorageIndex_>
258 Scalar_& AmbiVector<Scalar_,StorageIndex_>::coeff(Index i)
259 {
260  if (m_mode==IsDense)
261  return m_buffer[i];
262  else
263  {
264  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
265  eigen_assert(m_mode==IsSparse);
266  if ((m_llSize==0) || (i<llElements[m_llStart].index))
267  {
268  return m_zero;
269  }
270  else
271  {
272  Index elid = m_llStart;
273  while (elid >= 0 && llElements[elid].index<i)
274  elid = llElements[elid].next;
275 
276  if (llElements[elid].index==i)
277  return llElements[m_llCurrent].value;
278  else
279  return m_zero;
280  }
281  }
282 }
283 
285 template<typename Scalar_,typename StorageIndex_>
286 class AmbiVector<Scalar_,StorageIndex_>::Iterator
287 {
288  public:
289  typedef Scalar_ Scalar;
290  typedef typename NumTraits<Scalar>::Real RealScalar;
291 
298  explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
299  : m_vector(vec)
300  {
301  using std::abs;
302  m_epsilon = epsilon;
303  m_isDense = m_vector.m_mode==IsDense;
304  if (m_isDense)
305  {
306  m_currentEl = 0; // this is to avoid a compilation warning
307  m_cachedValue = 0; // this is to avoid a compilation warning
308  m_cachedIndex = m_vector.m_start-1;
309  ++(*this);
310  }
311  else
312  {
313  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
314  m_currentEl = m_vector.m_llStart;
315  while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
316  m_currentEl = llElements[m_currentEl].next;
317  if (m_currentEl<0)
318  {
319  m_cachedValue = 0; // this is to avoid a compilation warning
320  m_cachedIndex = -1;
321  }
322  else
323  {
324  m_cachedIndex = llElements[m_currentEl].index;
325  m_cachedValue = llElements[m_currentEl].value;
326  }
327  }
328  }
329 
330  StorageIndex index() const { return m_cachedIndex; }
331  Scalar value() const { return m_cachedValue; }
332 
333  operator bool() const { return m_cachedIndex>=0; }
334 
335  Iterator& operator++()
336  {
337  using std::abs;
338  if (m_isDense)
339  {
340  do {
341  ++m_cachedIndex;
342  } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
343  if (m_cachedIndex<m_vector.m_end)
344  m_cachedValue = m_vector.m_buffer[m_cachedIndex];
345  else
346  m_cachedIndex=-1;
347  }
348  else
349  {
350  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
351  do {
352  m_currentEl = llElements[m_currentEl].next;
353  } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
354  if (m_currentEl<0)
355  {
356  m_cachedIndex = -1;
357  }
358  else
359  {
360  m_cachedIndex = llElements[m_currentEl].index;
361  m_cachedValue = llElements[m_currentEl].value;
362  }
363  }
364  return *this;
365  }
366 
367  protected:
368  const AmbiVector& m_vector; // the target vector
369  StorageIndex m_currentEl; // the current element in sparse/linked-list mode
370  RealScalar m_epsilon; // epsilon used to prune zero coefficients
371  StorageIndex m_cachedIndex; // current coordinate
372  Scalar m_cachedValue; // current value
373  bool m_isDense; // mode of the vector
374 };
375 
376 } // end namespace internal
377 
378 } // end namespace Eigen
379 
380 #endif // EIGEN_AMBIVECTOR_H
const AbsReturnType abs() const
#define EIGEN_RESTRICT
Definition: Macros.h:1055
#define eigen_internal_assert(x)
Definition: Macros.h:908
#define eigen_assert(x)
Definition: Macros.h:902
v resize(3)
v setZero(3)
static const lastp1_t end
bfloat16 operator++(bfloat16 &a)
Definition: BFloat16.h:298
bfloat16() min(const bfloat16 &a, const bfloat16 &b)
Definition: BFloat16.h:684
IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:64
: InteropHeaders
Definition: Core:139
@ IsDense
Definition: Constants.h:369
@ IsSparse
Definition: Constants.h:370
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:82
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_abs_op< typename Derived::Scalar >, const Derived > abs(const Eigen::ArrayBase< Derived > &x)