21 #ifndef EIGEN_SPARSE_AMD_H
22 #define EIGEN_SPARSE_AMD_H
30 template<
typename T>
inline T amd_flip(
const T&
i) {
return -
i-2; }
32 template<
typename T0,
typename T1>
inline bool amd_marked(
const T0*
w,
const T1&
j) {
return w[
j]<0; }
36 template<
typename StorageIndex>
37 static StorageIndex
cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *
w, StorageIndex
n)
40 if(mark < 2 || (mark + lemax < 0))
42 for(k = 0; k <
n; k++)
51 template<
typename StorageIndex>
52 StorageIndex
cs_tdfs(StorageIndex
j, StorageIndex k, StorageIndex *
head,
const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
54 StorageIndex
i,
p, top = 0;
55 if(!
head || !next || !post || !stack)
return (-1);
85 template<
typename Scalar,
typename StorageIndex>
90 StorageIndex d, dk, dext, lemax = 0,
e, elenk, eln,
i,
j, k, k1,
91 k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
92 ok, nel = 0,
p,
p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
94 StorageIndex
n = StorageIndex(C.
cols());
95 dense = std::max<StorageIndex> (16, StorageIndex(10 *
sqrt(
double(
n))));
98 StorageIndex cnz = StorageIndex(C.
nonZeros());
100 t = cnz + cnz/5 + 2*
n;
105 StorageIndex* len = W;
106 StorageIndex* nv = W + (
n+1);
107 StorageIndex* next = W + 2*(
n+1);
108 StorageIndex*
head = W + 3*(
n+1);
109 StorageIndex* elen = W + 4*(
n+1);
110 StorageIndex* degree = W + 5*(
n+1);
111 StorageIndex*
w = W + 6*(
n+1);
112 StorageIndex* hhead = W + 7*(
n+1);
118 for(k = 0; k <
n; k++)
119 len[k] = Cp[k+1] - Cp[k];
123 for(
i = 0;
i <=
n;
i++)
134 mark = internal::cs_wclear<StorageIndex>(0, 0,
w,
n);
137 for(
i = 0;
i <
n;
i++)
139 bool has_diag =
false;
140 for(
p = Cp[
i];
p<Cp[
i+1]; ++
p)
148 if(d == 1 && has_diag)
155 else if(d > dense || !has_diag)
178 for(k = -1; mindeg <
n && (k =
head[mindeg]) == -1; mindeg++) {}
179 if(next[k] != -1)
last[next[k]] = -1;
180 head[mindeg] = next[k];
186 if(elenk > 0 && cnz + mindeg >= nzmax)
188 for(
j = 0;
j <
n;
j++)
196 for(q = 0,
p = 0;
p < cnz; )
202 for(k3 = 0; k3 < len[
j]-1; k3++) Ci[q++] = Ci[
p++];
212 pk1 = (elenk == 0) ?
p : cnz;
214 for(k1 = 1; k1 <= elenk + 1; k1++)
228 for(k2 = 1; k2 <= ln; k2++)
231 if((nvi = nv[
i]) <= 0)
continue;
242 head[degree[
i]] = next[
i];
251 if(elenk != 0) cnz = pk2;
258 mark = internal::cs_wclear<StorageIndex>(mark, lemax,
w,
n);
259 for(pk = pk1; pk < pk2; pk++)
262 if((eln = elen[
i]) <= 0)
continue;
265 for(
p = Cp[
i];
p <= Cp[
i] + eln - 1;
p++)
274 w[
e] = degree[
e] + wnvi;
280 for(pk = pk1; pk < pk2; pk++)
284 p2 =
p1 + elen[
i] - 1;
286 for(h = 0, d = 0,
p =
p1;
p <= p2;
p++)
305 elen[
i] = pn -
p1 + 1;
308 for(
p = p2 + 1;
p < p4;
p++)
311 if((nvj = nv[
j]) <= 0)
continue;
328 degree[
i] = std::min<StorageIndex> (degree[
i], d);
332 len[
i] = pn -
p1 + 1;
340 lemax = std::max<StorageIndex>(lemax, dk);
341 mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax,
w,
n);
344 for(pk = pk1; pk < pk2; pk++)
347 if(nv[
i] >= 0)
continue;
351 for(;
i != -1 && next[
i] != -1;
i = next[
i], mark++)
355 for(
p = Cp[
i]+1;
p <= Cp[
i] + ln-1;
p++)
w[Ci[
p]] = mark;
357 for(
j = next[
i];
j != -1; )
359 ok = (len[
j] == ln) && (elen[
j] == eln);
360 for(
p = Cp[
j] + 1; ok &&
p <= Cp[
j] + ln - 1;
p++)
362 if(
w[Ci[
p]] != mark) ok = 0;
383 for(
p = pk1, pk = pk1; pk < pk2; pk++)
386 if((nvi = -nv[
i]) <= 0)
continue;
388 d = degree[
i] + dk - nvi;
389 d = std::min<StorageIndex> (d,
n - nel - nvi);
394 mindeg = std::min<StorageIndex> (mindeg, d);
399 if((len[k] =
p-pk1) == 0)
404 if(elenk != 0) cnz =
p;
410 for(
j =
n;
j >= 0;
j--)
412 if(nv[
j] > 0)
continue;
416 for(
e =
n;
e >= 0;
e--)
418 if(nv[
e] <= 0)
continue;
425 for(k = 0,
i = 0;
i <=
n;
i++)
427 if(Cp[
i] == -1) k = internal::cs_tdfs<StorageIndex>(
i, k,
head, next, perm.
indices().data(),
w);
430 perm.
indices().conservativeResize(
n);
const SqrtReturnType sqrt() const
FixedSegmentReturnType<... >::Type head(NType n)
Array< double, 1, 3 > e(1./3., 0.5, 2.)
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
void resize(Index newSize)
const IndicesType & indices() const
void resizeNonZeros(Index size)
const StorageIndex * outerIndexPtr() const
const StorageIndex * innerIndexPtr() const
bfloat16() min(const bfloat16 &a, const bfloat16 &b)
StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
static StorageIndex cs_wclear(StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
bool amd_marked(const T0 *w, const T1 &j)
void amd_mark(const T0 *w, const T1 &j)
void minimum_degree_ordering(SparseMatrix< Scalar, ColMajor, StorageIndex > &C, PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm)
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_sqrt_op< typename Derived::Scalar >, const Derived > sqrt(const Eigen::ArrayBase< Derived > &x)