2 * Copyright 2011-2017 Blender Foundation
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __UTIL_MATH_MATRIX_H__
18 #define __UTIL_MATH_MATRIX_H__
22 #define MAT(A, size, row, col) A[(row)*(size)+(col)]
24 /* Variants that use a constant stride on GPUS. */
26 #define MATS(A, n, r, c, s) A[((r)*(n)+(c))*(s)]
27 /* Element access when only the lower-triangular elements are stored. */
28 #define MATHS(A, r, c, s) A[((r)*((r)+1)/2+(c))*(s)]
29 #define VECS(V, i, s) V[(i)*(s)]
31 #define MATS(A, n, r, c, s) MAT(A, n, r, c)
32 #define MATHS(A, r, c, s) A[(r)*((r)+1)/2+(c)]
33 #define VECS(V, i, s) V[i]
36 /* Zeroing helpers. */
38 ccl_device_inline void math_vector_zero(float *v, int n)
40 for(int i = 0; i < n; i++)
44 ccl_device_inline void math_matrix_zero(float *A, int n)
46 for(int row = 0; row < n; row++)
47 for(int col = 0; col <= row; col++)
48 MAT(A, n, row, col) = 0.0f;
51 /* Elementary vector operations. */
53 ccl_device_inline void math_vector_add(float *a, float ccl_restrict_ptr b, int n)
55 for(int i = 0; i < n; i++)
59 ccl_device_inline void math_vector_mul(float *a, float ccl_restrict_ptr b, int n)
61 for(int i = 0; i < n; i++)
65 ccl_device_inline void math_vector_mul_strided(ccl_global float *a, float ccl_restrict_ptr b, int astride, int n)
67 for(int i = 0; i < n; i++)
71 ccl_device_inline void math_vector_scale(float *a, float b, int n)
73 for(int i = 0; i < n; i++)
77 ccl_device_inline void math_vector_max(float *a, float ccl_restrict_ptr b, int n)
79 for(int i = 0; i < n; i++)
80 a[i] = max(a[i], b[i]);
83 ccl_device_inline void math_vec3_add(float3 *v, int n, float *x, float3 w)
85 for(int i = 0; i < n; i++)
89 ccl_device_inline void math_vec3_add_strided(ccl_global float3 *v, int n, float *x, float3 w, int stride)
91 for(int i = 0; i < n; i++)
92 v[i*stride] += w*x[i];
95 /* Elementary matrix operations.
96 * Note: TriMatrix refers to a square matrix that is symmetric, and therefore its upper-triangular part isn't stored. */
98 ccl_device_inline void math_trimatrix_add_diagonal(ccl_global float *A, int n, float val, int stride)
100 for(int row = 0; row < n; row++)
101 MATHS(A, row, row, stride) += val;
104 /* Add Gramian matrix of v to A.
105 * The Gramian matrix of v is vt*v, so element (i,j) is v[i]*v[j]. */
106 ccl_device_inline void math_matrix_add_gramian(float *A,
108 float ccl_restrict_ptr v,
111 for(int row = 0; row < n; row++)
112 for(int col = 0; col <= row; col++)
113 MAT(A, n, row, col) += v[row]*v[col]*weight;
116 /* Add Gramian matrix of v to A.
117 * The Gramian matrix of v is vt*v, so element (i,j) is v[i]*v[j]. */
118 ccl_device_inline void math_trimatrix_add_gramian_strided(ccl_global float *A,
120 float ccl_restrict_ptr v,
124 for(int row = 0; row < n; row++)
125 for(int col = 0; col <= row; col++)
126 MATHS(A, row, col, stride) += v[row]*v[col]*weight;
129 /* Transpose matrix A inplace. */
130 ccl_device_inline void math_matrix_transpose(ccl_global float *A, int n, int stride)
132 for(int i = 0; i < n; i++) {
133 for(int j = 0; j < i; j++) {
134 float temp = MATS(A, n, i, j, stride);
135 MATS(A, n, i, j, stride) = MATS(A, n, j, i, stride);
136 MATS(A, n, j, i, stride) = temp;
144 /* Solvers for matrix problems */
146 /* In-place Cholesky-Banachiewicz decomposition of the square, positive-definite matrix A
147 * into a lower triangular matrix L so that A = L*L^T. A is being overwritten by L.
148 * Also, only the lower triangular part of A is ever accessed. */
149 ccl_device void math_trimatrix_cholesky(ccl_global float *A, int n, int stride)
151 for(int row = 0; row < n; row++) {
152 for(int col = 0; col <= row; col++) {
153 float sum_col = MATHS(A, row, col, stride);
154 for(int k = 0; k < col; k++) {
155 sum_col -= MATHS(A, row, k, stride) * MATHS(A, col, k, stride);
158 sum_col = sqrtf(max(sum_col, 0.0f));
161 sum_col /= MATHS(A, col, col, stride);
163 MATHS(A, row, col, stride) = sum_col;
168 /* Solve A*S=y for S given A and y, where A is symmetrical positive-semidefinite and both inputs are destroyed in the process.
170 * We can apply Cholesky decomposition to find a lower triangular L so that L*Lt = A.
171 * With that we get (L*Lt)*S = L*(Lt*S) = L*b = y, defining b as Lt*S.
172 * Since L is lower triangular, finding b is relatively easy since y is known.
173 * Then, the remaining problem is Lt*S = b, which again can be solved easily.
175 * This is useful for solving the normal equation S=inv(Xt*W*X)*Xt*W*y, since Xt*W*X is
176 * symmetrical positive-semidefinite by construction, so we can just use this function with A=Xt*W*X and y=Xt*W*y. */
177 ccl_device_inline void math_trimatrix_vec3_solve(ccl_global float *A, ccl_global float3 *y, int n, int stride)
179 /* Since the first entry of the design row is always 1, the upper-left element of XtWX is a good
180 * heuristic for the amount of pixels considered (with weighting), therefore the amount of correction
181 * is scaled based on it. */
182 math_trimatrix_add_diagonal(A, n, 3e-7f*A[0], stride); /* Improve the numerical stability. */
183 math_trimatrix_cholesky(A, n, stride); /* Replace A with L so that L*Lt = A. */
185 /* Use forward substitution to solve L*b = y, replacing y by b. */
186 for(int row = 0; row < n; row++) {
187 float3 sum = VECS(y, row, stride);
188 for(int col = 0; col < row; col++)
189 sum -= MATHS(A, row, col, stride) * VECS(y, col, stride);
190 VECS(y, row, stride) = sum / MATHS(A, row, row, stride);
193 /* Use backward substitution to solve Lt*S = b, replacing b by S. */
194 for(int row = n-1; row >= 0; row--) {
195 float3 sum = VECS(y, row, stride);
196 for(int col = row+1; col < n; col++)
197 sum -= MATHS(A, col, row, stride) * VECS(y, col, stride);
198 VECS(y, row, stride) = sum / MATHS(A, row, row, stride);
206 /* Perform the Jacobi Eigenvalue Methon on matrix A.
207 * A is assumed to be a symmetrical matrix, therefore only the lower-triangular part is ever accessed.
208 * The algorithm overwrites the contents of A.
210 * After returning, A will be overwritten with D, which is (almost) diagonal,
211 * and V will contain the eigenvectors of the original A in its rows (!),
212 * so that A = V^T*D*V. Therefore, the diagonal elements of D are the (sorted) eigenvalues of A.
214 ccl_device void math_matrix_jacobi_eigendecomposition(float *A, ccl_global float *V, int n, int v_stride)
216 const float singular_epsilon = 1e-9f;
218 for (int row = 0; row < n; row++)
219 for (int col = 0; col < n; col++)
220 MATS(V, n, row, col, v_stride) = (col == row) ? 1.0f : 0.0f;
222 for (int sweep = 0; sweep < 8; sweep++) {
223 float off_diagonal = 0.0f;
224 for (int row = 1; row < n; row++)
225 for (int col = 0; col < row; col++)
226 off_diagonal += fabsf(MAT(A, n, row, col));
227 if (off_diagonal < 1e-7f) {
228 /* The matrix has nearly reached diagonal form.
229 * Since the eigenvalues are only used to determine truncation, their exact values aren't required - a relative error of a few ULPs won't matter at all. */
233 /* Set the threshold for the small element rotation skip in the first sweep:
234 * Skip all elements that are less than a tenth of the average off-diagonal element. */
235 float threshold = 0.2f*off_diagonal / (n*n);
237 for(int row = 1; row < n; row++) {
238 for(int col = 0; col < row; col++) {
239 /* Perform a Jacobi rotation on this element that reduces it to zero. */
240 float element = MAT(A, n, row, col);
241 float abs_element = fabsf(element);
243 /* If we're in a later sweep and the element already is very small, just set it to zero and skip the rotation. */
244 if (sweep > 3 && abs_element <= singular_epsilon*fabsf(MAT(A, n, row, row)) && abs_element <= singular_epsilon*fabsf(MAT(A, n, col, col))) {
245 MAT(A, n, row, col) = 0.0f;
249 if(element == 0.0f) {
253 /* If we're in one of the first sweeps and the element is smaller than the threshold, skip it. */
254 if(sweep < 3 && (abs_element < threshold)) {
258 /* Determine rotation: The rotation is characterized by its angle phi - or, in the actual implementation, sin(phi) and cos(phi).
259 * To find those, we first compute their ratio - that might be unstable if the angle approaches 90°, so there's a fallback for that case.
260 * Then, we compute sin(phi) and cos(phi) themselves. */
261 float singular_diff = MAT(A, n, row, row) - MAT(A, n, col, col);
263 if (abs_element > singular_epsilon*fabsf(singular_diff)) {
264 float cot_2phi = 0.5f*singular_diff / element;
265 ratio = 1.0f / (fabsf(cot_2phi) + sqrtf(1.0f + cot_2phi*cot_2phi));
266 if (cot_2phi < 0.0f) ratio = -ratio; /* Copy sign. */
269 ratio = element / singular_diff;
272 float c = 1.0f / sqrtf(1.0f + ratio*ratio);
274 /* To improve numerical stability by avoiding cancellation, the update equations are reformulized to use sin(phi) and tan(phi/2) instead. */
275 float tan_phi_2 = s / (1.0f + c);
277 /* Update the singular values in the diagonal. */
278 float singular_delta = ratio*element;
279 MAT(A, n, row, row) += singular_delta;
280 MAT(A, n, col, col) -= singular_delta;
282 /* Set the element itself to zero. */
283 MAT(A, n, row, col) = 0.0f;
285 /* Perform the actual rotations on the matrices. */
286 #define ROT(M, r1, c1, r2, c2, stride) \
288 float M1 = MATS(M, n, r1, c1, stride); \
289 float M2 = MATS(M, n, r2, c2, stride); \
290 MATS(M, n, r1, c1, stride) -= s*(M2 + tan_phi_2*M1); \
291 MATS(M, n, r2, c2, stride) += s*(M1 - tan_phi_2*M2); \
294 /* Split into three parts to ensure correct accesses since we only store the lower-triangular part of A. */
295 for(int i = 0 ; i < col; i++) ROT(A, col, i, row, i, 1);
296 for(int i = col+1; i < row; i++) ROT(A, i, col, row, i, 1);
297 for(int i = row+1; i < n ; i++) ROT(A, i, col, i, row, 1);
299 for(int i = 0 ; i < n ; i++) ROT(V, col, i, row, i, v_stride);
305 /* Sort eigenvalues and the associated eigenvectors. */
306 for (int i = 0; i < n - 1; i++) {
307 float v = MAT(A, n, i, i);
309 for (int j = i; j < n; j++) {
310 if (MAT(A, n, j, j) >= v) {
316 /* Swap eigenvalues. */
317 MAT(A, n, k, k) = MAT(A, n, i, i);
319 /* Swap eigenvectors. */
320 for (int j = 0; j < n; j++) {
321 float v = MATS(V, n, i, j, v_stride);
322 MATS(V, n, i, j, v_stride) = MATS(V, n, k, j, v_stride);
323 MATS(V, n, k, j, v_stride) = v;
329 #ifdef __KERNEL_SSE3__
331 ccl_device_inline void math_vector_zero_sse(__m128 *A, int n)
333 for(int i = 0; i < n; i++)
334 A[i] = _mm_setzero_ps();
336 ccl_device_inline void math_matrix_zero_sse(__m128 *A, int n)
338 for(int row = 0; row < n; row++)
339 for(int col = 0; col <= row; col++)
340 MAT(A, n, row, col) = _mm_setzero_ps();
343 /* Add Gramian matrix of v to A.
344 * The Gramian matrix of v is v^T*v, so element (i,j) is v[i]*v[j]. */
345 ccl_device_inline void math_matrix_add_gramian_sse(__m128 *A, int n, __m128 ccl_restrict_ptr v, __m128 weight)
347 for(int row = 0; row < n; row++)
348 for(int col = 0; col <= row; col++)
349 MAT(A, n, row, col) = _mm_add_ps(MAT(A, n, row, col), _mm_mul_ps(_mm_mul_ps(v[row], v[col]), weight));
352 ccl_device_inline void math_vector_add_sse(__m128 *V, int n, __m128 ccl_restrict_ptr a)
354 for(int i = 0; i < n; i++)
355 V[i] = _mm_add_ps(V[i], a[i]);
358 ccl_device_inline void math_vector_mul_sse(__m128 *V, int n, __m128 ccl_restrict_ptr a)
360 for(int i = 0; i < n; i++)
361 V[i] = _mm_mul_ps(V[i], a[i]);
364 ccl_device_inline void math_vector_max_sse(__m128 *a, __m128 ccl_restrict_ptr b, int n)
366 for(int i = 0; i < n; i++)
367 a[i] = _mm_max_ps(a[i], b[i]);
370 ccl_device_inline void math_matrix_hsum(float *A, int n, __m128 ccl_restrict_ptr B)
372 for(int row = 0; row < n; row++)
373 for(int col = 0; col <= row; col++)
374 MAT(A, n, row, col) = _mm_hsum_ss(MAT(B, n, row, col));
382 #endif /* __UTIL_MATH_MATRIX_H__ */