1 /* ========================================================================= */ 2 /* === AMD_1 =============================================================== */ 3 /* ========================================================================= */ 4 5 /* ------------------------------------------------------------------------- */ 6 /* AMD, Copyright (c) Timothy A. Davis, */ 7 /* Patrick R. Amestoy, and Iain S. Duff. See ../README.txt for License. */ 8 /* email: DrTimothyAldenDavis@gmail.com */ 9 /* ------------------------------------------------------------------------- */ 10 11 /* AMD_1: Construct A+A' for a sparse matrix A and perform the AMD ordering. 12 * 13 * The n-by-n sparse matrix A can be unsymmetric. It is stored in MATLAB-style 14 * compressed-column form, with sorted row indices in each column, and no 15 * duplicate entries. Diagonal entries may be present, but they are ignored. 16 * Row indices of column j of A are stored in Ai [Ap [j] ... Ap [j+1]-1]. 17 * Ap [0] must be zero, and nz = Ap [n] is the number of entries in A. The 18 * size of the matrix, n, must be greater than or equal to zero. 19 * 20 * This routine must be preceded by a call to AMD_aat, which computes the 21 * number of entries in each row/column in A+A', excluding the diagonal. 22 * Len [j], on input, is the number of entries in row/column j of A+A'. This 23 * routine constructs the matrix A+A' and then calls AMD_2. No error checking 24 * is performed (this was done in AMD_valid). 25 */ 26 27 module amd_1; 28 29 nothrow @nogc extern(C): 30 31 import glob_opts; // for c_float 32 import amd_internal; 33 import amd; 34 import amd_valid; 35 import amd_2; 36 37 void AMD_1 38 ( 39 Int n, /* n > 0 */ 40 const Int * Ap, /* input of size n+1, not modified */ 41 const Int * Ai, /* input of size nz = Ap [n], not modified */ 42 Int * P, /* size n output permutation */ 43 Int * Pinv, /* size n output inverse permutation */ 44 Int * Len, /* size n input, undefined on output */ 45 Int slen, /* slen >= sum (Len [0..n-1]) + 7n, 46 * ideally slen = 1.2 * sum (Len) + 8n */ 47 Int * S, /* size slen workspace */ 48 c_float * Control, /* input array of size AMD_CONTROL */ 49 c_float * Info /* output array of size AMD_INFO */ 50 ) 51 { 52 Int i; 53 Int j; 54 Int k; 55 Int p; 56 Int pfree; 57 Int iwlen; 58 Int pj; 59 Int p1; 60 Int p2; 61 Int pj2; 62 Int *Iw; 63 Int *Pe; 64 Int *Nv; 65 Int *Head; 66 Int *Elen; 67 Int *Degree; 68 Int *s; 69 Int *W; 70 Int *Sp; 71 Int *Tp; 72 73 /* --------------------------------------------------------------------- */ 74 /* construct the matrix for AMD_2 */ 75 /* --------------------------------------------------------------------- */ 76 77 ASSERT (n > 0) ; 78 79 iwlen = slen - 6*n ; 80 s = S ; 81 Pe = s ; s += n ; 82 Nv = s ; s += n ; 83 Head = s ; s += n ; 84 Elen = s ; s += n ; 85 Degree = s ; s += n ; 86 W = s ; s += n ; 87 Iw = s ; s += iwlen ; 88 89 ASSERT (AMD_valid (n, n, Ap, Ai) == AMD_OK) ; 90 91 /* construct the pointers for A+A' */ 92 Sp = Nv ; /* use Nv and W as workspace for Sp and Tp [ */ 93 Tp = W ; 94 pfree = 0 ; 95 for (j = 0 ; j < n ; j++) 96 { 97 Pe [j] = pfree ; 98 Sp [j] = pfree ; 99 pfree += Len [j] ; 100 } 101 102 /* Note that this restriction on iwlen is slightly more restrictive than 103 * what is strictly required in AMD_2. AMD_2 can operate with no elbow 104 * room at all, but it will be very slow. For better performance, at 105 * least size-n elbow room is enforced. */ 106 ASSERT (iwlen >= pfree + n) ; 107 108 version(NDEBUG){} 109 else { 110 for (p = 0 ; p < iwlen ; p++) Iw [p] = EMPTY ; 111 } // !NDEBUG 112 113 for (k = 0 ; k < n ; k++) 114 { 115 AMD_DEBUG1 ("Construct row/column k= "~ID~" of A+A'\n", k); 116 p1 = Ap [k] ; 117 p2 = Ap [k+1] ; 118 119 /* construct A+A' */ 120 for (p = p1 ; p < p2 ; ) 121 { 122 /* scan the upper triangular part of A */ 123 j = Ai [p] ; 124 ASSERT (j >= 0 && j < n) ; 125 if (j < k) 126 { 127 /* entry A (j,k) in the strictly upper triangular part */ 128 ASSERT (Sp [j] < (j == n-1 ? pfree : Pe [j+1])) ; 129 ASSERT (Sp [k] < (k == n-1 ? pfree : Pe [k+1])) ; 130 Iw [Sp [j]++] = k ; 131 Iw [Sp [k]++] = j ; 132 p++ ; 133 } 134 else if (j == k) 135 { 136 /* skip the diagonal */ 137 p++ ; 138 break ; 139 } 140 else /* j > k */ 141 { 142 /* first entry below the diagonal */ 143 break ; 144 } 145 /* scan lower triangular part of A, in column j until reaching 146 * row k. Start where last scan left off. */ 147 ASSERT (Ap [j] <= Tp [j] && Tp [j] <= Ap [j+1]) ; 148 pj2 = Ap [j+1] ; 149 for (pj = Tp [j] ; pj < pj2 ; ) 150 { 151 i = Ai [pj] ; 152 ASSERT (i >= 0 && i < n) ; 153 if (i < k) 154 { 155 /* A (i,j) is only in the lower part, not in upper */ 156 ASSERT (Sp [i] < (i == n-1 ? pfree : Pe [i+1])) ; 157 ASSERT (Sp [j] < (j == n-1 ? pfree : Pe [j+1])) ; 158 Iw [Sp [i]++] = j ; 159 Iw [Sp [j]++] = i ; 160 pj++ ; 161 } 162 else if (i == k) 163 { 164 /* entry A (k,j) in lower part and A (j,k) in upper */ 165 pj++ ; 166 break ; 167 } 168 else /* i > k */ 169 { 170 /* consider this entry later, when k advances to i */ 171 break ; 172 } 173 } 174 Tp [j] = pj ; 175 } 176 Tp [k] = p ; 177 } 178 179 /* clean up, for remaining mismatched entries */ 180 for (j = 0 ; j < n ; j++) 181 { 182 for (pj = Tp [j] ; pj < Ap [j+1] ; pj++) 183 { 184 i = Ai [pj] ; 185 ASSERT (i >= 0 && i < n) ; 186 /* A (i,j) is only in the lower part, not in upper */ 187 ASSERT (Sp [i] < (i == n-1 ? pfree : Pe [i+1])) ; 188 ASSERT (Sp [j] < (j == n-1 ? pfree : Pe [j+1])) ; 189 Iw [Sp [i]++] = j ; 190 Iw [Sp [j]++] = i ; 191 } 192 } 193 194 version(NDEBUG){} 195 else { 196 for (j = 0 ; j < n-1 ; j++) ASSERT (Sp [j] == Pe [j+1]) ; 197 ASSERT (Sp [n-1] == pfree) ; 198 } // !NDEBUG 199 200 /* Tp and Sp no longer needed ] */ 201 202 /* --------------------------------------------------------------------- */ 203 /* order the matrix */ 204 /* --------------------------------------------------------------------- */ 205 206 AMD_2 (n, Pe, Iw, Len, iwlen, pfree, 207 Nv, Pinv, P, Head, Elen, Degree, W, Control, Info) ; 208 }