4 * ***** BEGIN GPL LICENSE BLOCK *****
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
21 * All rights reserved.
23 * The Original Code is: some of this file.
25 * ***** END GPL LICENSE BLOCK *****
28 /** \file blender/blenlib/intern/math_matrix.c
36 /********************************* Init **************************************/
38 void zero_m3(float m[3][3])
40 memset(m, 0, 3*3*sizeof(float));
43 void zero_m4(float m[4][4])
45 memset(m, 0, 4*4*sizeof(float));
48 void unit_m3(float m[][3])
50 m[0][0]= m[1][1]= m[2][2]= 1.0;
51 m[0][1]= m[0][2]= 0.0;
52 m[1][0]= m[1][2]= 0.0;
53 m[2][0]= m[2][1]= 0.0;
56 void unit_m4(float m[][4])
58 m[0][0]= m[1][1]= m[2][2]= m[3][3]= 1.0;
59 m[0][1]= m[0][2]= m[0][3]= 0.0;
60 m[1][0]= m[1][2]= m[1][3]= 0.0;
61 m[2][0]= m[2][1]= m[2][3]= 0.0;
62 m[3][0]= m[3][1]= m[3][2]= 0.0;
65 void copy_m3_m3(float m1[][3], float m2[][3])
67 /* destination comes first: */
68 memcpy(&m1[0], &m2[0], 9*sizeof(float));
71 void copy_m4_m4(float m1[][4], float m2[][4])
73 memcpy(m1, m2, 4*4*sizeof(float));
76 void copy_m3_m4(float m1[][3], float m2[][4])
91 void copy_m4_m3(float m1[][4], float m2[][3]) /* no clear */
105 /* Reevan's Bugfix */
117 void swap_m3m3(float m1[][3], float m2[][3])
122 for(i = 0; i < 3; i++) {
123 for (j = 0; j < 3; j++) {
131 void swap_m4m4(float m1[][4], float m2[][4])
136 for(i = 0; i < 4; i++) {
137 for (j = 0; j < 4; j++) {
145 /******************************** Arithmetic *********************************/
147 void mul_m4_m4m4(float m1[][4], float m2_[][4], float m3_[][4])
149 float m2[4][4], m3[4][4];
151 /* copy so it works when m1 is the same pointer as m2 or m3 */
155 /* matrix product: m1[j][k] = m2[j][i].m3[i][k] */
156 m1[0][0] = m2[0][0]*m3[0][0] + m2[0][1]*m3[1][0] + m2[0][2]*m3[2][0] + m2[0][3]*m3[3][0];
157 m1[0][1] = m2[0][0]*m3[0][1] + m2[0][1]*m3[1][1] + m2[0][2]*m3[2][1] + m2[0][3]*m3[3][1];
158 m1[0][2] = m2[0][0]*m3[0][2] + m2[0][1]*m3[1][2] + m2[0][2]*m3[2][2] + m2[0][3]*m3[3][2];
159 m1[0][3] = m2[0][0]*m3[0][3] + m2[0][1]*m3[1][3] + m2[0][2]*m3[2][3] + m2[0][3]*m3[3][3];
161 m1[1][0] = m2[1][0]*m3[0][0] + m2[1][1]*m3[1][0] + m2[1][2]*m3[2][0] + m2[1][3]*m3[3][0];
162 m1[1][1] = m2[1][0]*m3[0][1] + m2[1][1]*m3[1][1] + m2[1][2]*m3[2][1] + m2[1][3]*m3[3][1];
163 m1[1][2] = m2[1][0]*m3[0][2] + m2[1][1]*m3[1][2] + m2[1][2]*m3[2][2] + m2[1][3]*m3[3][2];
164 m1[1][3] = m2[1][0]*m3[0][3] + m2[1][1]*m3[1][3] + m2[1][2]*m3[2][3] + m2[1][3]*m3[3][3];
166 m1[2][0] = m2[2][0]*m3[0][0] + m2[2][1]*m3[1][0] + m2[2][2]*m3[2][0] + m2[2][3]*m3[3][0];
167 m1[2][1] = m2[2][0]*m3[0][1] + m2[2][1]*m3[1][1] + m2[2][2]*m3[2][1] + m2[2][3]*m3[3][1];
168 m1[2][2] = m2[2][0]*m3[0][2] + m2[2][1]*m3[1][2] + m2[2][2]*m3[2][2] + m2[2][3]*m3[3][2];
169 m1[2][3] = m2[2][0]*m3[0][3] + m2[2][1]*m3[1][3] + m2[2][2]*m3[2][3] + m2[2][3]*m3[3][3];
171 m1[3][0] = m2[3][0]*m3[0][0] + m2[3][1]*m3[1][0] + m2[3][2]*m3[2][0] + m2[3][3]*m3[3][0];
172 m1[3][1] = m2[3][0]*m3[0][1] + m2[3][1]*m3[1][1] + m2[3][2]*m3[2][1] + m2[3][3]*m3[3][1];
173 m1[3][2] = m2[3][0]*m3[0][2] + m2[3][1]*m3[1][2] + m2[3][2]*m3[2][2] + m2[3][3]*m3[3][2];
174 m1[3][3] = m2[3][0]*m3[0][3] + m2[3][1]*m3[1][3] + m2[3][2]*m3[2][3] + m2[3][3]*m3[3][3];
178 void mul_m3_m3m3(float m1[][3], float m3_[][3], float m2_[][3])
180 float m2[3][3], m3[3][3];
182 /* copy so it works when m1 is the same pointer as m2 or m3 */
186 /* m1[i][j] = m2[i][k]*m3[k][j], args are flipped! */
187 m1[0][0]= m2[0][0]*m3[0][0] + m2[0][1]*m3[1][0] + m2[0][2]*m3[2][0];
188 m1[0][1]= m2[0][0]*m3[0][1] + m2[0][1]*m3[1][1] + m2[0][2]*m3[2][1];
189 m1[0][2]= m2[0][0]*m3[0][2] + m2[0][1]*m3[1][2] + m2[0][2]*m3[2][2];
191 m1[1][0]= m2[1][0]*m3[0][0] + m2[1][1]*m3[1][0] + m2[1][2]*m3[2][0];
192 m1[1][1]= m2[1][0]*m3[0][1] + m2[1][1]*m3[1][1] + m2[1][2]*m3[2][1];
193 m1[1][2]= m2[1][0]*m3[0][2] + m2[1][1]*m3[1][2] + m2[1][2]*m3[2][2];
195 m1[2][0]= m2[2][0]*m3[0][0] + m2[2][1]*m3[1][0] + m2[2][2]*m3[2][0];
196 m1[2][1]= m2[2][0]*m3[0][1] + m2[2][1]*m3[1][1] + m2[2][2]*m3[2][1];
197 m1[2][2]= m2[2][0]*m3[0][2] + m2[2][1]*m3[1][2] + m2[2][2]*m3[2][2];
200 void mul_m4_m4m3(float (*m1)[4], float (*m3)[4], float (*m2)[3])
202 m1[0][0]= m2[0][0]*m3[0][0] + m2[0][1]*m3[1][0] + m2[0][2]*m3[2][0];
203 m1[0][1]= m2[0][0]*m3[0][1] + m2[0][1]*m3[1][1] + m2[0][2]*m3[2][1];
204 m1[0][2]= m2[0][0]*m3[0][2] + m2[0][1]*m3[1][2] + m2[0][2]*m3[2][2];
205 m1[1][0]= m2[1][0]*m3[0][0] + m2[1][1]*m3[1][0] + m2[1][2]*m3[2][0];
206 m1[1][1]= m2[1][0]*m3[0][1] + m2[1][1]*m3[1][1] + m2[1][2]*m3[2][1];
207 m1[1][2]= m2[1][0]*m3[0][2] + m2[1][1]*m3[1][2] + m2[1][2]*m3[2][2];
208 m1[2][0]= m2[2][0]*m3[0][0] + m2[2][1]*m3[1][0] + m2[2][2]*m3[2][0];
209 m1[2][1]= m2[2][0]*m3[0][1] + m2[2][1]*m3[1][1] + m2[2][2]*m3[2][1];
210 m1[2][2]= m2[2][0]*m3[0][2] + m2[2][1]*m3[1][2] + m2[2][2]*m3[2][2];
213 /* m1 = m2 * m3, ignore the elements on the 4th row/column of m3*/
214 void mul_m3_m3m4(float m1[][3], float m2[][3], float m3[][4])
216 /* m1[i][j] = m2[i][k] * m3[k][j] */
217 m1[0][0] = m2[0][0] * m3[0][0] + m2[0][1] * m3[1][0] +m2[0][2] * m3[2][0];
218 m1[0][1] = m2[0][0] * m3[0][1] + m2[0][1] * m3[1][1] +m2[0][2] * m3[2][1];
219 m1[0][2] = m2[0][0] * m3[0][2] + m2[0][1] * m3[1][2] +m2[0][2] * m3[2][2];
221 m1[1][0] = m2[1][0] * m3[0][0] + m2[1][1] * m3[1][0] +m2[1][2] * m3[2][0];
222 m1[1][1] = m2[1][0] * m3[0][1] + m2[1][1] * m3[1][1] +m2[1][2] * m3[2][1];
223 m1[1][2] = m2[1][0] * m3[0][2] + m2[1][1] * m3[1][2] +m2[1][2] * m3[2][2];
225 m1[2][0] = m2[2][0] * m3[0][0] + m2[2][1] * m3[1][0] +m2[2][2] * m3[2][0];
226 m1[2][1] = m2[2][0] * m3[0][1] + m2[2][1] * m3[1][1] +m2[2][2] * m3[2][1];
227 m1[2][2] = m2[2][0] * m3[0][2] + m2[2][1] * m3[1][2] +m2[2][2] * m3[2][2];
230 void mul_m4_m3m4(float (*m1)[4], float (*m3)[3], float (*m2)[4])
232 m1[0][0]= m2[0][0]*m3[0][0] + m2[0][1]*m3[1][0] + m2[0][2]*m3[2][0];
233 m1[0][1]= m2[0][0]*m3[0][1] + m2[0][1]*m3[1][1] + m2[0][2]*m3[2][1];
234 m1[0][2]= m2[0][0]*m3[0][2] + m2[0][1]*m3[1][2] + m2[0][2]*m3[2][2];
235 m1[1][0]= m2[1][0]*m3[0][0] + m2[1][1]*m3[1][0] + m2[1][2]*m3[2][0];
236 m1[1][1]= m2[1][0]*m3[0][1] + m2[1][1]*m3[1][1] + m2[1][2]*m3[2][1];
237 m1[1][2]= m2[1][0]*m3[0][2] + m2[1][1]*m3[1][2] + m2[1][2]*m3[2][2];
238 m1[2][0]= m2[2][0]*m3[0][0] + m2[2][1]*m3[1][0] + m2[2][2]*m3[2][0];
239 m1[2][1]= m2[2][0]*m3[0][1] + m2[2][1]*m3[1][1] + m2[2][2]*m3[2][1];
240 m1[2][2]= m2[2][0]*m3[0][2] + m2[2][1]*m3[1][2] + m2[2][2]*m3[2][2];
243 void mul_serie_m3(float answ[][3],
244 float m1[][3], float m2[][3], float m3[][3],
245 float m4[][3], float m5[][3], float m6[][3],
246 float m7[][3], float m8[][3])
250 if(m1==NULL || m2==NULL) return;
252 mul_m3_m3m3(answ, m2, m1);
254 mul_m3_m3m3(temp, m3, answ);
256 mul_m3_m3m3(answ, m4, temp);
258 mul_m3_m3m3(temp, m5, answ);
260 mul_m3_m3m3(answ, m6, temp);
262 mul_m3_m3m3(temp, m7, answ);
264 mul_m3_m3m3(answ, m8, temp);
266 else copy_m3_m3(answ, temp);
269 else copy_m3_m3(answ, temp);
272 else copy_m3_m3(answ, temp);
276 void mul_serie_m4(float answ[][4], float m1[][4],
277 float m2[][4], float m3[][4], float m4[][4],
278 float m5[][4], float m6[][4], float m7[][4],
283 if(m1==NULL || m2==NULL) return;
285 mul_m4_m4m4(answ, m2, m1);
287 mul_m4_m4m4(temp, m3, answ);
289 mul_m4_m4m4(answ, m4, temp);
291 mul_m4_m4m4(temp, m5, answ);
293 mul_m4_m4m4(answ, m6, temp);
295 mul_m4_m4m4(temp, m7, answ);
297 mul_m4_m4m4(answ, m8, temp);
299 else copy_m4_m4(answ, temp);
302 else copy_m4_m4(answ, temp);
305 else copy_m4_m4(answ, temp);
309 void mul_m4_v3(float mat[][4], float *vec)
315 vec[0]=x*mat[0][0] + y*mat[1][0] + mat[2][0]*vec[2] + mat[3][0];
316 vec[1]=x*mat[0][1] + y*mat[1][1] + mat[2][1]*vec[2] + mat[3][1];
317 vec[2]=x*mat[0][2] + y*mat[1][2] + mat[2][2]*vec[2] + mat[3][2];
320 void mul_v3_m4v3(float *in, float mat[][4], float *vec)
326 in[0]= x*mat[0][0] + y*mat[1][0] + mat[2][0]*vec[2] + mat[3][0];
327 in[1]= x*mat[0][1] + y*mat[1][1] + mat[2][1]*vec[2] + mat[3][1];
328 in[2]= x*mat[0][2] + y*mat[1][2] + mat[2][2]*vec[2] + mat[3][2];
331 /* same as mul_m4_v3() but doesnt apply translation component */
332 void mul_mat3_m4_v3(float mat[][4], float *vec)
338 vec[0]= x*mat[0][0] + y*mat[1][0] + mat[2][0]*vec[2];
339 vec[1]= x*mat[0][1] + y*mat[1][1] + mat[2][1]*vec[2];
340 vec[2]= x*mat[0][2] + y*mat[1][2] + mat[2][2]*vec[2];
343 void mul_project_m4_v3(float mat[][4], float vec[3])
345 const float w= vec[0]*mat[0][3] + vec[1]*mat[1][3] + vec[2]*mat[2][3] + mat[3][3];
353 void mul_v4_m4v4(float r[4], float mat[4][4], float v[4])
361 r[0]= x*mat[0][0] + y*mat[1][0] + z*mat[2][0] + mat[3][0]*v[3];
362 r[1]= x*mat[0][1] + y*mat[1][1] + z*mat[2][1] + mat[3][1]*v[3];
363 r[2]= x*mat[0][2] + y*mat[1][2] + z*mat[2][2] + mat[3][2]*v[3];
364 r[3]= x*mat[0][3] + y*mat[1][3] + z*mat[2][3] + mat[3][3]*v[3];
367 void mul_m4_v4(float mat[4][4], float r[4])
369 mul_v4_m4v4(r, mat, r);
372 void mul_v3_m3v3(float r[3], float M[3][3], float a[3])
374 r[0]= M[0][0]*a[0] + M[1][0]*a[1] + M[2][0]*a[2];
375 r[1]= M[0][1]*a[0] + M[1][1]*a[1] + M[2][1]*a[2];
376 r[2]= M[0][2]*a[0] + M[1][2]*a[1] + M[2][2]*a[2];
379 void mul_m3_v3(float M[3][3], float r[3])
383 mul_v3_m3v3(tmp, M, r);
387 void mul_transposed_m3_v3(float mat[][3], float *vec)
393 vec[0]= x*mat[0][0] + y*mat[0][1] + mat[0][2]*vec[2];
394 vec[1]= x*mat[1][0] + y*mat[1][1] + mat[1][2]*vec[2];
395 vec[2]= x*mat[2][0] + y*mat[2][1] + mat[2][2]*vec[2];
398 void mul_m3_fl(float m[3][3], float f)
407 void mul_m4_fl(float m[4][4], float f)
416 void mul_mat3_m4_fl(float m[4][4], float f)
425 void mul_m3_v3_double(float mat[][3], double *vec)
431 vec[0]= x*mat[0][0] + y*mat[1][0] + mat[2][0]*vec[2];
432 vec[1]= x*mat[0][1] + y*mat[1][1] + mat[2][1]*vec[2];
433 vec[2]= x*mat[0][2] + y*mat[1][2] + mat[2][2]*vec[2];
436 void add_m3_m3m3(float m1[][3], float m2[][3], float m3[][3])
442 m1[i][j]= m2[i][j] + m3[i][j];
445 void add_m4_m4m4(float m1[][4], float m2[][4], float m3[][4])
451 m1[i][j]= m2[i][j] + m3[i][j];
454 int invert_m3(float m[3][3])
459 success= invert_m3_m3(tmp, m);
465 int invert_m3_m3(float m1[3][3], float m2[3][3])
471 adjoint_m3_m3(m1,m2);
473 /* then determinant old matrix! */
474 det= m2[0][0]* (m2[1][1]*m2[2][2] - m2[1][2]*m2[2][1])
475 -m2[1][0]* (m2[0][1]*m2[2][2] - m2[0][2]*m2[2][1])
476 +m2[2][0]* (m2[0][1]*m2[1][2] - m2[0][2]*m2[1][1]);
491 int invert_m4(float m[4][4])
496 success= invert_m4_m4(tmp, m);
504 * computes the inverse of mat and puts it in inverse. Returns
505 * TRUE on success (i.e. can always find a pivot) and FALSE on failure.
506 * Uses Gaussian Elimination with partial (maximal column) pivoting.
511 int invert_m4_m4(float inverse[4][4], float mat[4][4])
519 /* Set inverse to identity */
526 /* Copy original matrix so we don't mess it up */
527 for(i = 0; i < 4; i++)
528 for(j = 0; j <4; j++)
529 tempmat[i][j] = mat[i][j];
531 for(i = 0; i < 4; i++) {
532 /* Look for row with max pivot */
533 max = fabs(tempmat[i][i]);
535 for(j = i + 1; j < 4; j++) {
536 if(fabsf(tempmat[j][i]) > max) {
537 max = fabs(tempmat[j][i]);
541 /* Swap rows if necessary */
543 for(k = 0; k < 4; k++) {
544 SWAP(float, tempmat[i][k], tempmat[maxj][k]);
545 SWAP(float, inverse[i][k], inverse[maxj][k]);
549 temp = tempmat[i][i];
551 return 0; /* No non-zero pivot */
552 for(k = 0; k < 4; k++) {
553 tempmat[i][k] = (float)(tempmat[i][k]/temp);
554 inverse[i][k] = (float)(inverse[i][k]/temp);
556 for(j = 0; j < 4; j++) {
558 temp = tempmat[j][i];
559 for(k = 0; k < 4; k++) {
560 tempmat[j][k] -= (float)(tempmat[i][k]*temp);
561 inverse[j][k] -= (float)(inverse[i][k]*temp);
569 /****************************** Linear Algebra *******************************/
571 void transpose_m3(float mat[][3])
576 mat[0][1] = mat[1][0] ;
579 mat[0][2] = mat[2][0] ;
582 mat[1][2] = mat[2][1] ;
586 void transpose_m4(float mat[][4])
591 mat[0][1] = mat[1][0] ;
594 mat[0][2] = mat[2][0] ;
597 mat[0][3] = mat[3][0] ;
601 mat[1][2] = mat[2][1] ;
604 mat[1][3] = mat[3][1] ;
608 mat[2][3] = mat[3][2] ;
612 void orthogonalize_m3(float mat[][3], int axis)
615 mat3_to_size(size, mat);
616 normalize_v3(mat[axis]);
620 if (dot_v3v3(mat[0], mat[1]) < 1) {
621 cross_v3_v3v3(mat[2], mat[0], mat[1]);
622 normalize_v3(mat[2]);
623 cross_v3_v3v3(mat[1], mat[2], mat[0]);
624 } else if (dot_v3v3(mat[0], mat[2]) < 1) {
625 cross_v3_v3v3(mat[1], mat[2], mat[0]);
626 normalize_v3(mat[1]);
627 cross_v3_v3v3(mat[2], mat[0], mat[1]);
635 cross_v3_v3v3(mat[2], mat[0], vec);
636 normalize_v3(mat[2]);
637 cross_v3_v3v3(mat[1], mat[2], mat[0]);
640 if (dot_v3v3(mat[1], mat[0]) < 1) {
641 cross_v3_v3v3(mat[2], mat[0], mat[1]);
642 normalize_v3(mat[2]);
643 cross_v3_v3v3(mat[0], mat[1], mat[2]);
644 } else if (dot_v3v3(mat[0], mat[2]) < 1) {
645 cross_v3_v3v3(mat[0], mat[1], mat[2]);
646 normalize_v3(mat[0]);
647 cross_v3_v3v3(mat[2], mat[0], mat[1]);
655 cross_v3_v3v3(mat[0], mat[1], vec);
656 normalize_v3(mat[0]);
657 cross_v3_v3v3(mat[2], mat[0], mat[1]);
660 if (dot_v3v3(mat[2], mat[0]) < 1) {
661 cross_v3_v3v3(mat[1], mat[2], mat[0]);
662 normalize_v3(mat[1]);
663 cross_v3_v3v3(mat[0], mat[1], mat[2]);
664 } else if (dot_v3v3(mat[2], mat[1]) < 1) {
665 cross_v3_v3v3(mat[0], mat[1], mat[2]);
666 normalize_v3(mat[0]);
667 cross_v3_v3v3(mat[1], mat[2], mat[0]);
675 cross_v3_v3v3(mat[0], vec, mat[2]);
676 normalize_v3(mat[0]);
677 cross_v3_v3v3(mat[1], mat[2], mat[0]);
680 mul_v3_fl(mat[0], size[0]);
681 mul_v3_fl(mat[1], size[1]);
682 mul_v3_fl(mat[2], size[2]);
685 void orthogonalize_m4(float mat[][4], int axis)
688 mat4_to_size(size, mat);
689 normalize_v3(mat[axis]);
693 if (dot_v3v3(mat[0], mat[1]) < 1) {
694 cross_v3_v3v3(mat[2], mat[0], mat[1]);
695 normalize_v3(mat[2]);
696 cross_v3_v3v3(mat[1], mat[2], mat[0]);
697 } else if (dot_v3v3(mat[0], mat[2]) < 1) {
698 cross_v3_v3v3(mat[1], mat[2], mat[0]);
699 normalize_v3(mat[1]);
700 cross_v3_v3v3(mat[2], mat[0], mat[1]);
708 cross_v3_v3v3(mat[2], mat[0], vec);
709 normalize_v3(mat[2]);
710 cross_v3_v3v3(mat[1], mat[2], mat[0]);
713 normalize_v3(mat[0]);
714 if (dot_v3v3(mat[1], mat[0]) < 1) {
715 cross_v3_v3v3(mat[2], mat[0], mat[1]);
716 normalize_v3(mat[2]);
717 cross_v3_v3v3(mat[0], mat[1], mat[2]);
718 } else if (dot_v3v3(mat[0], mat[2]) < 1) {
719 cross_v3_v3v3(mat[0], mat[1], mat[2]);
720 normalize_v3(mat[0]);
721 cross_v3_v3v3(mat[2], mat[0], mat[1]);
729 cross_v3_v3v3(mat[0], mat[1], vec);
730 normalize_v3(mat[0]);
731 cross_v3_v3v3(mat[2], mat[0], mat[1]);
734 if (dot_v3v3(mat[2], mat[0]) < 1) {
735 cross_v3_v3v3(mat[1], mat[2], mat[0]);
736 normalize_v3(mat[1]);
737 cross_v3_v3v3(mat[0], mat[1], mat[2]);
738 } else if (dot_v3v3(mat[2], mat[1]) < 1) {
739 cross_v3_v3v3(mat[0], mat[1], mat[2]);
740 normalize_v3(mat[0]);
741 cross_v3_v3v3(mat[1], mat[2], mat[0]);
749 cross_v3_v3v3(mat[0], vec, mat[2]);
750 normalize_v3(mat[0]);
751 cross_v3_v3v3(mat[1], mat[2], mat[0]);
754 mul_v3_fl(mat[0], size[0]);
755 mul_v3_fl(mat[1], size[1]);
756 mul_v3_fl(mat[2], size[2]);
759 int is_orthogonal_m3(float mat[][3])
761 if (fabsf(dot_v3v3(mat[0], mat[1])) > 1.5f * FLT_EPSILON)
764 if (fabsf(dot_v3v3(mat[1], mat[2])) > 1.5f * FLT_EPSILON)
767 if (fabsf(dot_v3v3(mat[0], mat[2])) > 1.5f * FLT_EPSILON)
773 int is_orthogonal_m4(float mat[][4])
775 if (fabsf(dot_v3v3(mat[0], mat[1])) > 1.5f * FLT_EPSILON)
778 if (fabsf(dot_v3v3(mat[1], mat[2])) > 1.5f * FLT_EPSILON)
781 if (fabsf(dot_v3v3(mat[0], mat[2])) > 1.5f * FLT_EPSILON)
787 void normalize_m3(float mat[][3])
789 normalize_v3(mat[0]);
790 normalize_v3(mat[1]);
791 normalize_v3(mat[2]);
794 void normalize_m3_m3(float rmat[][3], float mat[][3])
796 normalize_v3_v3(rmat[0], mat[0]);
797 normalize_v3_v3(rmat[1], mat[1]);
798 normalize_v3_v3(rmat[2], mat[2]);
802 void normalize_m4(float mat[][4])
806 len= normalize_v3(mat[0]);
807 if(len!=0.0f) mat[0][3]/= len;
808 len= normalize_v3(mat[1]);
809 if(len!=0.0f) mat[1][3]/= len;
810 len= normalize_v3(mat[2]);
811 if(len!=0.0f) mat[2][3]/= len;
814 void normalize_m4_m4(float rmat[][4], float mat[][4])
818 len= normalize_v3_v3(rmat[0], mat[0]);
819 if(len!=0.0f) rmat[0][3]= mat[0][3] / len;
820 len= normalize_v3_v3(rmat[1], mat[1]);
821 if(len!=0.0f) rmat[1][3]= mat[1][3] / len;
822 len= normalize_v3_v3(rmat[2], mat[2]);
823 if(len!=0.0f) rmat[2][3]= mat[2][3] / len;
826 void adjoint_m3_m3(float m1[][3], float m[][3])
828 m1[0][0]=m[1][1]*m[2][2]-m[1][2]*m[2][1];
829 m1[0][1]= -m[0][1]*m[2][2]+m[0][2]*m[2][1];
830 m1[0][2]=m[0][1]*m[1][2]-m[0][2]*m[1][1];
832 m1[1][0]= -m[1][0]*m[2][2]+m[1][2]*m[2][0];
833 m1[1][1]=m[0][0]*m[2][2]-m[0][2]*m[2][0];
834 m1[1][2]= -m[0][0]*m[1][2]+m[0][2]*m[1][0];
836 m1[2][0]=m[1][0]*m[2][1]-m[1][1]*m[2][0];
837 m1[2][1]= -m[0][0]*m[2][1]+m[0][1]*m[2][0];
838 m1[2][2]=m[0][0]*m[1][1]-m[0][1]*m[1][0];
841 void adjoint_m4_m4(float out[][4], float in[][4]) /* out = ADJ(in) */
843 float a1, a2, a3, a4, b1, b2, b3, b4;
844 float c1, c2, c3, c4, d1, d2, d3, d4;
867 out[0][0] = determinant_m3(b2, b3, b4, c2, c3, c4, d2, d3, d4);
868 out[1][0] = - determinant_m3(a2, a3, a4, c2, c3, c4, d2, d3, d4);
869 out[2][0] = determinant_m3(a2, a3, a4, b2, b3, b4, d2, d3, d4);
870 out[3][0] = - determinant_m3(a2, a3, a4, b2, b3, b4, c2, c3, c4);
872 out[0][1] = - determinant_m3(b1, b3, b4, c1, c3, c4, d1, d3, d4);
873 out[1][1] = determinant_m3(a1, a3, a4, c1, c3, c4, d1, d3, d4);
874 out[2][1] = - determinant_m3(a1, a3, a4, b1, b3, b4, d1, d3, d4);
875 out[3][1] = determinant_m3(a1, a3, a4, b1, b3, b4, c1, c3, c4);
877 out[0][2] = determinant_m3(b1, b2, b4, c1, c2, c4, d1, d2, d4);
878 out[1][2] = - determinant_m3(a1, a2, a4, c1, c2, c4, d1, d2, d4);
879 out[2][2] = determinant_m3(a1, a2, a4, b1, b2, b4, d1, d2, d4);
880 out[3][2] = - determinant_m3(a1, a2, a4, b1, b2, b4, c1, c2, c4);
882 out[0][3] = - determinant_m3(b1, b2, b3, c1, c2, c3, d1, d2, d3);
883 out[1][3] = determinant_m3(a1, a2, a3, c1, c2, c3, d1, d2, d3);
884 out[2][3] = - determinant_m3(a1, a2, a3, b1, b2, b3, d1, d2, d3);
885 out[3][3] = determinant_m3(a1, a2, a3, b1, b2, b3, c1, c2, c3);
888 float determinant_m2(float a,float b,float c,float d)
894 float determinant_m3(float a1, float a2, float a3,
895 float b1, float b2, float b3,
896 float c1, float c2, float c3)
900 ans = a1 * determinant_m2(b2, b3, c2, c3)
901 - b1 * determinant_m2(a2, a3, c2, c3)
902 + c1 * determinant_m2(a2, a3, b2, b3);
907 float determinant_m4(float m[][4])
910 float a1,a2,a3,a4,b1,b2,b3,b4,c1,c2,c3,c4,d1,d2,d3,d4;
932 ans = a1 * determinant_m3(b2, b3, b4, c2, c3, c4, d2, d3, d4)
933 - b1 * determinant_m3(a2, a3, a4, c2, c3, c4, d2, d3, d4)
934 + c1 * determinant_m3(a2, a3, a4, b2, b3, b4, d2, d3, d4)
935 - d1 * determinant_m3(a2, a3, a4, b2, b3, b4, c2, c3, c4);
940 /****************************** Transformations ******************************/
942 void size_to_mat3(float mat[][3], const float size[3])
955 void size_to_mat4(float mat[][4], const float size[3])
959 size_to_mat3(tmat,size);
961 copy_m4_m3(mat, tmat);
964 void mat3_to_size(float *size, float mat[][3])
966 size[0]= len_v3(mat[0]);
967 size[1]= len_v3(mat[1]);
968 size[2]= len_v3(mat[2]);
971 void mat4_to_size(float *size, float mat[][4])
973 size[0]= len_v3(mat[0]);
974 size[1]= len_v3(mat[1]);
975 size[2]= len_v3(mat[2]);
978 /* this gets the average scale of a matrix, only use when your scaling
979 * data that has no idea of scale axis, examples are bone-envelope-radius
980 * and curve radius */
981 float mat3_to_scale(float mat[][3])
983 /* unit length vector */
984 float unit_vec[3] = {0.577350269189626f, 0.577350269189626f, 0.577350269189626f};
985 mul_m3_v3(mat, unit_vec);
986 return len_v3(unit_vec);
989 float mat4_to_scale(float mat[][4])
992 copy_m3_m4(tmat, mat);
993 return mat3_to_scale(tmat);
997 void mat3_to_rot_size(float rot[3][3], float size[3], float mat3[3][3])
999 float mat3_n[3][3]; /* mat3 -> normalized, 3x3 */
1000 float imat3_n[3][3]; /* mat3 -> normalized & inverted, 3x3 */
1002 /* rotation & scale are linked, we need to create the mat's
1003 * for these together since they are related. */
1005 /* so scale doesnt interfear with rotation [#24291] */
1006 /* note: this is a workaround for negative matrix not working for rotation conversion, FIXME */
1007 normalize_m3_m3(mat3_n, mat3);
1008 if(is_negative_m3(mat3)) {
1009 negate_v3(mat3_n[0]);
1010 negate_v3(mat3_n[1]);
1011 negate_v3(mat3_n[2]);
1015 /* keep rot as a 3x3 matrix, the caller can convert into a quat or euler */
1016 copy_m3_m3(rot, mat3_n);
1019 /* note: mat4_to_size(ob->size, mat) fails for negative scale */
1020 invert_m3_m3(imat3_n, mat3_n);
1021 mul_m3_m3m3(mat3, imat3_n, mat3);
1023 size[0]= mat3[0][0];
1024 size[1]= mat3[1][1];
1025 size[2]= mat3[2][2];
1028 void mat4_to_loc_rot_size(float loc[3], float rot[3][3], float size[3], float wmat[][4])
1030 float mat3[3][3]; /* wmat -> 3x3 */
1032 copy_m3_m4(mat3, wmat);
1033 mat3_to_rot_size(rot, size, mat3);
1036 copy_v3_v3(loc, wmat[3]);
1039 void scale_m3_fl(float m[][3], float scale)
1041 m[0][0]= m[1][1]= m[2][2]= scale;
1042 m[0][1]= m[0][2]= 0.0;
1043 m[1][0]= m[1][2]= 0.0;
1044 m[2][0]= m[2][1]= 0.0;
1047 void scale_m4_fl(float m[][4], float scale)
1049 m[0][0]= m[1][1]= m[2][2]= scale;
1051 m[0][1]= m[0][2]= m[0][3]= 0.0;
1052 m[1][0]= m[1][2]= m[1][3]= 0.0;
1053 m[2][0]= m[2][1]= m[2][3]= 0.0;
1054 m[3][0]= m[3][1]= m[3][2]= 0.0;
1057 void translate_m4(float mat[][4],float Tx, float Ty, float Tz)
1059 mat[3][0] += (Tx*mat[0][0] + Ty*mat[1][0] + Tz*mat[2][0]);
1060 mat[3][1] += (Tx*mat[0][1] + Ty*mat[1][1] + Tz*mat[2][1]);
1061 mat[3][2] += (Tx*mat[0][2] + Ty*mat[1][2] + Tz*mat[2][2]);
1064 void rotate_m4(float mat[][4], const char axis, const float angle)
1067 float temp[4]= {0.0f, 0.0f, 0.0f, 0.0f};
1070 assert(axis >= 'X' && axis <= 'Z');
1072 cosine = (float)cos(angle);
1073 sine = (float)sin(angle);
1076 for(col=0 ; col<4 ; col++)
1077 temp[col] = cosine*mat[1][col] + sine*mat[2][col];
1078 for(col=0 ; col<4 ; col++) {
1079 mat[2][col] = - sine*mat[1][col] + cosine*mat[2][col];
1080 mat[1][col] = temp[col];
1085 for(col=0 ; col<4 ; col++)
1086 temp[col] = cosine*mat[0][col] - sine*mat[2][col];
1087 for(col=0 ; col<4 ; col++) {
1088 mat[2][col] = sine*mat[0][col] + cosine*mat[2][col];
1089 mat[0][col] = temp[col];
1094 for(col=0 ; col<4 ; col++)
1095 temp[col] = cosine*mat[0][col] + sine*mat[1][col];
1096 for(col=0 ; col<4 ; col++) {
1097 mat[1][col] = - sine*mat[0][col] + cosine*mat[1][col];
1098 mat[0][col] = temp[col];
1104 void blend_m3_m3m3(float out[][3], float dst[][3], float src[][3], const float srcweight)
1106 float srot[3][3], drot[3][3];
1107 float squat[4], dquat[4], fquat[4];
1108 float ssize[3], dsize[3], fsize[3];
1109 float rmat[3][3], smat[3][3];
1111 mat3_to_rot_size(drot, dsize, dst);
1112 mat3_to_rot_size(srot, ssize, src);
1114 mat3_to_quat(dquat, drot);
1115 mat3_to_quat(squat, srot);
1118 interp_qt_qtqt(fquat, dquat, squat, srcweight);
1119 interp_v3_v3v3(fsize, dsize, ssize, srcweight);
1121 /* compose new matrix */
1122 quat_to_mat3(rmat,fquat);
1123 size_to_mat3(smat,fsize);
1124 mul_m3_m3m3(out, rmat, smat);
1127 void blend_m4_m4m4(float out[][4], float dst[][4], float src[][4], const float srcweight)
1129 float sloc[3], dloc[3], floc[3];
1130 float srot[3][3], drot[3][3];
1131 float squat[4], dquat[4], fquat[4];
1132 float ssize[3], dsize[3], fsize[3];
1134 mat4_to_loc_rot_size(dloc, drot, dsize, dst);
1135 mat4_to_loc_rot_size(sloc, srot, ssize, src);
1137 mat3_to_quat(dquat, drot);
1138 mat3_to_quat(squat, srot);
1141 interp_v3_v3v3(floc, dloc, sloc, srcweight);
1142 interp_qt_qtqt(fquat, dquat, squat, srcweight);
1143 interp_v3_v3v3(fsize, dsize, ssize, srcweight);
1145 /* compose new matrix */
1146 loc_quat_size_to_mat4(out, floc, fquat, fsize);
1150 int is_negative_m3(float mat[][3])
1153 cross_v3_v3v3(vec, mat[0], mat[1]);
1154 return (dot_v3v3(vec, mat[2]) < 0.0f);
1157 int is_negative_m4(float mat[][4])
1160 cross_v3_v3v3(vec, mat[0], mat[1]);
1161 return (dot_v3v3(vec, mat[2]) < 0.0f);
1164 /* make a 4x4 matrix out of 3 transform components */
1165 /* matrices are made in the order: scale * rot * loc */
1166 // TODO: need to have a version that allows for rotation order...
1167 void loc_eul_size_to_mat4(float mat[4][4], const float loc[3], const float eul[3], const float size[3])
1169 float rmat[3][3], smat[3][3], tmat[3][3];
1171 /* initialise new matrix */
1174 /* make rotation + scaling part */
1175 eul_to_mat3(rmat,eul);
1176 size_to_mat3(smat,size);
1177 mul_m3_m3m3(tmat, rmat, smat);
1179 /* copy rot/scale part to output matrix*/
1180 copy_m4_m3(mat, tmat);
1182 /* copy location to matrix */
1188 /* make a 4x4 matrix out of 3 transform components */
1189 /* matrices are made in the order: scale * rot * loc */
1190 void loc_eulO_size_to_mat4(float mat[4][4], const float loc[3], const float eul[3], const float size[3], const short rotOrder)
1192 float rmat[3][3], smat[3][3], tmat[3][3];
1194 /* initialise new matrix */
1197 /* make rotation + scaling part */
1198 eulO_to_mat3(rmat,eul, rotOrder);
1199 size_to_mat3(smat,size);
1200 mul_m3_m3m3(tmat, rmat, smat);
1202 /* copy rot/scale part to output matrix*/
1203 copy_m4_m3(mat, tmat);
1205 /* copy location to matrix */
1212 /* make a 4x4 matrix out of 3 transform components */
1213 /* matrices are made in the order: scale * rot * loc */
1214 void loc_quat_size_to_mat4(float mat[4][4], const float loc[3], const float quat[4], const float size[3])
1216 float rmat[3][3], smat[3][3], tmat[3][3];
1218 /* initialise new matrix */
1221 /* make rotation + scaling part */
1222 quat_to_mat3(rmat,quat);
1223 size_to_mat3(smat,size);
1224 mul_m3_m3m3(tmat, rmat, smat);
1226 /* copy rot/scale part to output matrix*/
1227 copy_m4_m3(mat, tmat);
1229 /* copy location to matrix */
1235 void loc_axisangle_size_to_mat4(float mat[4][4], const float loc[3], const float axis[3], const float angle, const float size[3])
1238 axis_angle_to_quat(q, axis, angle);
1239 loc_quat_size_to_mat4(mat, loc, q, size);
1242 /*********************************** Other ***********************************/
1244 void print_m3(const char *str, float m[][3])
1246 printf("%s\n", str);
1247 printf("%f %f %f\n",m[0][0],m[1][0],m[2][0]);
1248 printf("%f %f %f\n",m[0][1],m[1][1],m[2][1]);
1249 printf("%f %f %f\n",m[0][2],m[1][2],m[2][2]);
1253 void print_m4(const char *str, float m[][4])
1255 printf("%s\n", str);
1256 printf("%f %f %f %f\n",m[0][0],m[1][0],m[2][0],m[3][0]);
1257 printf("%f %f %f %f\n",m[0][1],m[1][1],m[2][1],m[3][1]);
1258 printf("%f %f %f %f\n",m[0][2],m[1][2],m[2][2],m[3][2]);
1259 printf("%f %f %f %f\n",m[0][3],m[1][3],m[2][3],m[3][3]);
1263 /*********************************** SVD ************************************
1264 * from TNT matrix library
1266 * Compute the Single Value Decomposition of an arbitrary matrix A
1267 * That is compute the 3 matrices U,W,V with U column orthogonal (m,n)
1268 * ,W a diagonal matrix and V an orthogonal square matrix s.t.
1269 * A = U.W.Vt. From this decomposition it is trivial to compute the
1270 * (pseudo-inverse) of A as Ainv = V.Winv.tranpose(U).
1273 void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4])
1276 float work1[4], work2[4];
1282 float *work = work1;
1286 int i=0, j=0, k=0, p, pp, iter;
1288 // Reduce A to bidiagonal form, storing the diagonal elements
1289 // in s and the super-diagonal elements in e.
1291 int nct = minf(m-1,n);
1292 int nrt = maxf(0,minf(n-2,m));
1298 for (k = 0; k < maxf(nct,nrt); k++) {
1301 // Compute the transformation for the k-th column and
1302 // place the k-th diagonal in s[k].
1303 // Compute 2-norm of k-th column without under/overflow.
1305 for (i = k; i < m; i++) {
1306 s[k] = hypotf(s[k],A[i][k]);
1310 if (A[k][k] < 0.0f) {
1314 for (i = k; i < m; i++) {
1321 for (j = k+1; j < n; j++) {
1322 if ((k < nct) && (s[k] != 0.0f)) {
1324 // Apply the transformation.
1327 for (i = k; i < m; i++) {
1328 t += A[i][k]*A[i][j];
1331 for (i = k; i < m; i++) {
1332 A[i][j] += t*A[i][k];
1336 // Place the k-th row of A into e for the
1337 // subsequent calculation of the row transformation.
1343 // Place the transformation in U for subsequent back
1346 for (i = k; i < m; i++)
1351 // Compute the k-th row transformation and place the
1352 // k-th super-diagonal in e[k].
1353 // Compute 2-norm without under/overflow.
1355 for (i = k+1; i < n; i++) {
1356 e[k] = hypotf(e[k],e[i]);
1360 if (e[k+1] < 0.0f) {
1364 for (i = k+1; i < n; i++) {
1370 if ((k+1 < m) & (e[k] != 0.0f)) {
1373 // Apply the transformation.
1375 for (i = k+1; i < m; i++) {
1378 for (j = k+1; j < n; j++) {
1379 for (i = k+1; i < m; i++) {
1380 work[i] += e[j]*A[i][j];
1383 invek1 = 1.0f/e[k+1];
1384 for (j = k+1; j < n; j++) {
1385 float t = -e[j]*invek1;
1386 for (i = k+1; i < m; i++) {
1387 A[i][j] += t*work[i];
1392 // Place the transformation in V for subsequent
1393 // back multiplication.
1395 for (i = k+1; i < n; i++)
1400 // Set up the final bidiagonal matrix or order p.
1404 s[nct] = A[nct][nct];
1410 e[nrt] = A[nrt][p-1];
1414 // If required, generate U.
1416 for (j = nct; j < nu; j++) {
1417 for (i = 0; i < m; i++) {
1422 for (k = nct-1; k >= 0; k--) {
1424 for (j = k+1; j < nu; j++) {
1426 for (i = k; i < m; i++) {
1427 t += U[i][k]*U[i][j];
1430 for (i = k; i < m; i++) {
1431 U[i][j] += t*U[i][k];
1434 for (i = k; i < m; i++ ) {
1437 U[k][k] = 1.0f + U[k][k];
1438 for (i = 0; i < k-1; i++) {
1442 for (i = 0; i < m; i++) {
1449 // If required, generate V.
1451 for (k = n-1; k >= 0; k--) {
1452 if ((k < nrt) & (e[k] != 0.0f)) {
1453 for (j = k+1; j < nu; j++) {
1455 for (i = k+1; i < n; i++) {
1456 t += V[i][k]*V[i][j];
1459 for (i = k+1; i < n; i++) {
1460 V[i][j] += t*V[i][k];
1464 for (i = 0; i < n; i++) {
1470 // Main iteration loop for the singular values.
1474 eps = powf(2.0f,-52.0f);
1478 // Test for maximum iterations to avoid infinite loop
1483 // This section of the program inspects for
1484 // negligible elements in the s and e arrays. On
1485 // completion the variables kase and k are set as follows.
1487 // kase = 1 if s(p) and e[k-1] are negligible and k<p
1488 // kase = 2 if s(k) is negligible and k<p
1489 // kase = 3 if e[k-1] is negligible, k<p, and
1490 // s(k), ..., s(p) are not negligible (qr step).
1491 // kase = 4 if e(p-1) is negligible (convergence).
1493 for (k = p-2; k >= -1; k--) {
1497 if (fabsf(e[k]) <= eps*(fabsf(s[k]) + fabsf(s[k+1]))) {
1506 for (ks = p-1; ks >= k; ks--) {
1511 t = (ks != p ? fabsf(e[ks]) : 0.f) +
1512 (ks != k+1 ? fabsf(e[ks-1]) : 0.0f);
1513 if (fabsf(s[ks]) <= eps*t) {
1520 } else if (ks == p-1) {
1529 // Perform the task indicated by kase.
1533 // Deflate negligible s(p).
1538 for (j = p-2; j >= k; j--) {
1539 float t = hypotf(s[j],f);
1540 float invt = 1.0f/t;
1541 float cs = s[j]*invt;
1549 for (i = 0; i < n; i++) {
1550 t = cs*V[i][j] + sn*V[i][p-1];
1551 V[i][p-1] = -sn*V[i][j] + cs*V[i][p-1];
1558 // Split at negligible s(k).
1563 for (j = k; j < p; j++) {
1564 float t = hypotf(s[j],f);
1565 float invt = 1.0f/t;
1566 float cs = s[j]*invt;
1572 for (i = 0; i < m; i++) {
1573 t = cs*U[i][j] + sn*U[i][k-1];
1574 U[i][k-1] = -sn*U[i][j] + cs*U[i][k-1];
1581 // Perform one qr step.
1585 // Calculate the shift.
1587 float scale = maxf(maxf(maxf(maxf(
1588 fabsf(s[p-1]),fabsf(s[p-2])),fabsf(e[p-2])),
1589 fabsf(s[k])),fabsf(e[k]));
1590 float invscale = 1.0f/scale;
1591 float sp = s[p-1]*invscale;
1592 float spm1 = s[p-2]*invscale;
1593 float epm1 = e[p-2]*invscale;
1594 float sk = s[k]*invscale;
1595 float ek = e[k]*invscale;
1596 float b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)*0.5f;
1597 float c = (sp*epm1)*(sp*epm1);
1600 if ((b != 0.0f) || (c != 0.0f)) {
1601 shift = sqrtf(b*b + c);
1605 shift = c/(b + shift);
1607 f = (sk + sp)*(sk - sp) + shift;
1612 for (j = k; j < p-1; j++) {
1613 float t = hypotf(f,g);
1614 /* division by zero checks added to avoid NaN (brecht) */
1615 float cs = (t == 0.0f)? 0.0f: f/t;
1616 float sn = (t == 0.0f)? 0.0f: g/t;
1620 f = cs*s[j] + sn*e[j];
1621 e[j] = cs*e[j] - sn*s[j];
1625 for (i = 0; i < n; i++) {
1626 t = cs*V[i][j] + sn*V[i][j+1];
1627 V[i][j+1] = -sn*V[i][j] + cs*V[i][j+1];
1632 /* division by zero checks added to avoid NaN (brecht) */
1633 cs = (t == 0.0f)? 0.0f: f/t;
1634 sn = (t == 0.0f)? 0.0f: g/t;
1636 f = cs*e[j] + sn*s[j+1];
1637 s[j+1] = -sn*e[j] + cs*s[j+1];
1641 for (i = 0; i < m; i++) {
1642 t = cs*U[i][j] + sn*U[i][j+1];
1643 U[i][j+1] = -sn*U[i][j] + cs*U[i][j+1];
1657 // Make the singular values positive.
1660 s[k] = (s[k] < 0.0f ? -s[k] : 0.0f);
1662 for (i = 0; i <= pp; i++)
1666 // Order the singular values.
1670 if (s[k] >= s[k+1]) {
1677 for (i = 0; i < n; i++) {
1678 t = V[i][k+1]; V[i][k+1] = V[i][k]; V[i][k] = t;
1682 for (i = 0; i < m; i++) {
1683 t = U[i][k+1]; U[i][k+1] = U[i][k]; U[i][k] = t;
1696 void pseudoinverse_m4_m4(float Ainv[4][4], float A[4][4], float epsilon)
1698 /* compute moon-penrose pseudo inverse of matrix, singular values
1699 below epsilon are ignored for stability (truncated SVD) */
1700 float V[4][4], W[4], Wm[4][4], U[4][4];
1710 Wm[i][i]= (W[i] < epsilon)? 0.0f: 1.0f/W[i];
1714 mul_serie_m4(Ainv, U, Wm, V, NULL, NULL, NULL, NULL, NULL);