svn merge ^/trunk/blender -r48153:48158
[blender.git] / source / blender / nodes / composite / node_composite_util.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation.
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/nodes/composite/node_composite_util.c
29  *  \ingroup nodes
30  */
31
32
33 #include "node_composite_util.h"
34
35 #include <limits.h>
36
37 CompBuf *alloc_compbuf(int sizex, int sizey, int type, int alloc)
38 {
39         CompBuf *cbuf= MEM_callocN(sizeof(CompBuf), "compbuf");
40         
41         cbuf->x= sizex;
42         cbuf->y= sizey;
43         cbuf->xrad= sizex/2;
44         cbuf->yrad= sizey/2;
45         
46         cbuf->type= type;
47         if (alloc) {
48                 if (cbuf->type==CB_RGBA)
49                         cbuf->rect= MEM_mapallocN(4*sizeof(float)*sizex*sizey, "compbuf RGBA rect");
50                 else if (cbuf->type==CB_VEC3)
51                         cbuf->rect= MEM_mapallocN(3*sizeof(float)*sizex*sizey, "compbuf Vector3 rect");
52                 else if (cbuf->type==CB_VEC2)
53                         cbuf->rect= MEM_mapallocN(2*sizeof(float)*sizex*sizey, "compbuf Vector2 rect");
54                 else
55                         cbuf->rect= MEM_mapallocN(sizeof(float)*sizex*sizey, "compbuf Fac rect");
56                 cbuf->malloc= 1;
57         }
58         cbuf->disprect.xmin = 0;
59         cbuf->disprect.ymin = 0;
60         cbuf->disprect.xmax = sizex;
61         cbuf->disprect.ymax = sizey;
62         
63         return cbuf;
64 }
65
66 CompBuf *dupalloc_compbuf(CompBuf *cbuf)
67 {
68         CompBuf *dupbuf= alloc_compbuf(cbuf->x, cbuf->y, cbuf->type, 1);
69         if (dupbuf) {
70                 memcpy(dupbuf->rect, cbuf->rect, cbuf->type*sizeof(float)*cbuf->x*cbuf->y);
71         
72                 dupbuf->xof= cbuf->xof;
73                 dupbuf->yof= cbuf->yof;
74         }       
75         return dupbuf;
76 }
77
78 /* instead of reference counting, we create a list */
79 CompBuf *pass_on_compbuf(CompBuf *cbuf)
80 {
81         CompBuf *dupbuf= (cbuf)? alloc_compbuf(cbuf->x, cbuf->y, cbuf->type, 0): NULL;
82         CompBuf *lastbuf;
83         
84         if (dupbuf) {
85                 dupbuf->rect= cbuf->rect;
86                 dupbuf->xof= cbuf->xof;
87                 dupbuf->yof= cbuf->yof;
88                 dupbuf->malloc= 0;
89                 
90                 /* get last buffer in list, and append dupbuf */
91                 for (lastbuf= cbuf; lastbuf; lastbuf= lastbuf->next)
92                         if (lastbuf->next==NULL)
93                                 break;
94                 lastbuf->next= dupbuf;
95                 dupbuf->prev= lastbuf;
96         }       
97         return dupbuf;
98 }
99
100
101 void free_compbuf(CompBuf *cbuf)
102 {
103         /* check referencing, then remove from list and set malloc tag */
104         if (cbuf->prev || cbuf->next) {
105                 if (cbuf->prev)
106                         cbuf->prev->next= cbuf->next;
107                 if (cbuf->next)
108                         cbuf->next->prev= cbuf->prev;
109                 if (cbuf->malloc) {
110                         if (cbuf->prev)
111                                 cbuf->prev->malloc= 1;
112                         else
113                                 cbuf->next->malloc= 1;
114                         cbuf->malloc= 0;
115                 }
116         }
117         
118         if (cbuf->malloc && cbuf->rect)
119                 MEM_freeN(cbuf->rect);
120
121         MEM_freeN(cbuf);
122 }
123
124 void print_compbuf(char *str, CompBuf *cbuf)
125 {
126         printf("Compbuf %s %d %d %p\n", str, cbuf->x, cbuf->y, (void *)cbuf->rect);
127         
128 }
129
130 void compbuf_set_node(CompBuf *cbuf, bNode *node)
131 {
132         if (cbuf) cbuf->node = node;
133 }
134
135
136 CompBuf *get_cropped_compbuf(rcti *drect, float *rectf, int rectx, int recty, int type)
137 {
138         CompBuf *cbuf;
139         rcti disprect= *drect;
140         float *outfp;
141         int dx, y;
142         
143         if (disprect.xmax>rectx) disprect.xmax = rectx;
144         if (disprect.ymax>recty) disprect.ymax = recty;
145         if (disprect.xmin>= disprect.xmax) return NULL;
146         if (disprect.ymin>= disprect.ymax) return NULL;
147         
148         cbuf= alloc_compbuf(disprect.xmax-disprect.xmin, disprect.ymax-disprect.ymin, type, 1);
149         outfp= cbuf->rect;
150         rectf += type*(disprect.ymin*rectx + disprect.xmin);
151         dx= type*cbuf->x;
152         for (y=cbuf->y; y>0; y--, outfp+=dx, rectf+=type*rectx)
153                 memcpy(outfp, rectf, sizeof(float)*dx);
154         
155         return cbuf;
156 }
157
158 CompBuf *scalefast_compbuf(CompBuf *inbuf, int newx, int newy)
159 {
160         CompBuf *outbuf; 
161         float *rectf, *newrectf, *rf;
162         int x, y, c, pixsize= inbuf->type;
163         int ofsx, ofsy, stepx, stepy;
164         
165         if (inbuf->x==newx && inbuf->y==newy)
166                 return dupalloc_compbuf(inbuf);
167         
168         outbuf= alloc_compbuf(newx, newy, inbuf->type, 1);
169         newrectf= outbuf->rect;
170         
171         stepx = (65536.0 * (inbuf->x - 1.0) / (newx - 1.0)) + 0.5;
172         stepy = (65536.0 * (inbuf->y - 1.0) / (newy - 1.0)) + 0.5;
173         ofsy = 32768;
174         
175         for (y = newy; y > 0 ; y--) {
176                 rectf = inbuf->rect;
177                 rectf += pixsize * (ofsy >> 16) * inbuf->x;
178
179                 ofsy += stepy;
180                 ofsx = 32768;
181                 
182                 for (x = newx ; x>0 ; x--) {
183                         
184                         rf= rectf + pixsize*(ofsx >> 16);
185                         for (c=0; c<pixsize; c++)
186                                 newrectf[c] = rf[c];
187                         
188                         newrectf+= pixsize;
189                         
190                         ofsx += stepx;
191                 }
192         }
193         
194         return outbuf;
195 }
196
197 void typecheck_compbuf_color(float *out, float *in, int outtype, int intype)
198 {
199         if (intype == outtype) {
200                 memcpy(out, in, sizeof(float)*outtype);
201         }
202         else if (outtype==CB_VAL) {
203                 if (intype==CB_VEC2) {
204                         *out= 0.5f*(in[0]+in[1]);
205                 }
206                 else if (intype==CB_VEC3) {
207                         *out= 0.333333f*(in[0]+in[1]+in[2]);
208                 }
209                 else if (intype==CB_RGBA) {
210                         *out= in[0]*0.35f + in[1]*0.45f + in[2]*0.2f;
211                 }
212         }
213         else if (outtype==CB_VEC2) {
214                 if (intype==CB_VAL) {
215                         out[0]= in[0];
216                         out[1]= in[0];
217                 }
218                 else if (intype==CB_VEC3) {
219                         out[0]= in[0];
220                         out[1]= in[1];
221                 }
222                 else if (intype==CB_RGBA) {
223                         out[0]= in[0];
224                         out[1]= in[1];
225                 }
226         }
227         else if (outtype==CB_VEC3) {
228                 if (intype==CB_VAL) {
229                         out[0]= in[0];
230                         out[1]= in[0];
231                         out[2]= in[0];
232                 }
233                 else if (intype==CB_VEC2) {
234                         out[0]= in[0];
235                         out[1]= in[1];
236                         out[2]= 0.0f;
237                 }
238                 else if (intype==CB_RGBA) {
239                         out[0]= in[0];
240                         out[1]= in[1];
241                         out[2]= in[2];
242                 }
243         }
244         else if (outtype==CB_RGBA) {
245                 if (intype==CB_VAL) {
246                         out[0]= in[0];
247                         out[1]= in[0];
248                         out[2]= in[0];
249                         out[3]= 1.0f;
250                 }
251                 else if (intype==CB_VEC2) {
252                         out[0]= in[0];
253                         out[1]= in[1];
254                         out[2]= 0.0f;
255                         out[3]= 1.0f;
256                 }
257                 else if (intype==CB_VEC3) {
258                         out[0]= in[0];
259                         out[1]= in[1];
260                         out[2]= in[2];
261                         out[3]= 1.0f;
262                 }
263         }
264 }
265
266 CompBuf *typecheck_compbuf(CompBuf *inbuf, int type)
267 {
268         if (inbuf && inbuf->type!=type) {
269                 CompBuf *outbuf;
270                 float *inrf, *outrf;
271                 int x;
272
273                 outbuf= alloc_compbuf(inbuf->x, inbuf->y, type, 1); 
274
275                 /* warning note: xof and yof are applied in pixelprocessor, but should be copied otherwise? */
276                 outbuf->xof= inbuf->xof;
277                 outbuf->yof= inbuf->yof;
278
279                 if (inbuf->rect_procedural) {
280                         outbuf->rect_procedural= inbuf->rect_procedural;
281                         copy_v3_v3(outbuf->procedural_size, inbuf->procedural_size);
282                         copy_v3_v3(outbuf->procedural_offset, inbuf->procedural_offset);
283                         outbuf->procedural_type= inbuf->procedural_type;
284                         outbuf->node= inbuf->node;
285                         return outbuf;
286                 }
287
288                 inrf= inbuf->rect;
289                 outrf= outbuf->rect;
290                 x= inbuf->x*inbuf->y;
291                 
292                 if (type==CB_VAL) {
293                         if (inbuf->type==CB_VEC2) {
294                                 for (; x>0; x--, outrf+= 1, inrf+= 2)
295                                         *outrf= 0.5f*(inrf[0]+inrf[1]);
296                         }
297                         else if (inbuf->type==CB_VEC3) {
298                                 for (; x>0; x--, outrf+= 1, inrf+= 3)
299                                         *outrf= 0.333333f*(inrf[0]+inrf[1]+inrf[2]);
300                         }
301                         else if (inbuf->type==CB_RGBA) {
302                                 for (; x>0; x--, outrf+= 1, inrf+= 4)
303                                         *outrf= inrf[0]*0.35f + inrf[1]*0.45f + inrf[2]*0.2f;
304                         }
305                 }
306                 else if (type==CB_VEC2) {
307                         if (inbuf->type==CB_VAL) {
308                                 for (; x>0; x--, outrf+= 2, inrf+= 1) {
309                                         outrf[0]= inrf[0];
310                                         outrf[1]= inrf[0];
311                                 }
312                         }
313                         else if (inbuf->type==CB_VEC3) {
314                                 for (; x>0; x--, outrf+= 2, inrf+= 3) {
315                                         outrf[0]= inrf[0];
316                                         outrf[1]= inrf[1];
317                                 }
318                         }
319                         else if (inbuf->type==CB_RGBA) {
320                                 for (; x>0; x--, outrf+= 2, inrf+= 4) {
321                                         outrf[0]= inrf[0];
322                                         outrf[1]= inrf[1];
323                                 }
324                         }
325                 }
326                 else if (type==CB_VEC3) {
327                         if (inbuf->type==CB_VAL) {
328                                 for (; x>0; x--, outrf+= 3, inrf+= 1) {
329                                         outrf[0]= inrf[0];
330                                         outrf[1]= inrf[0];
331                                         outrf[2]= inrf[0];
332                                 }
333                         }
334                         else if (inbuf->type==CB_VEC2) {
335                                 for (; x>0; x--, outrf+= 3, inrf+= 2) {
336                                         outrf[0]= inrf[0];
337                                         outrf[1]= inrf[1];
338                                         outrf[2]= 0.0f;
339                                 }
340                         }
341                         else if (inbuf->type==CB_RGBA) {
342                                 for (; x>0; x--, outrf+= 3, inrf+= 4) {
343                                         outrf[0]= inrf[0];
344                                         outrf[1]= inrf[1];
345                                         outrf[2]= inrf[2];
346                                 }
347                         }
348                 }
349                 else if (type==CB_RGBA) {
350                         if (inbuf->type==CB_VAL) {
351                                 for (; x>0; x--, outrf+= 4, inrf+= 1) {
352                                         outrf[0]= inrf[0];
353                                         outrf[1]= inrf[0];
354                                         outrf[2]= inrf[0];
355                                         outrf[3]= 1.0f;
356                                 }
357                         }
358                         else if (inbuf->type==CB_VEC2) {
359                                 for (; x>0; x--, outrf+= 4, inrf+= 2) {
360                                         outrf[0]= inrf[0];
361                                         outrf[1]= inrf[1];
362                                         outrf[2]= 0.0f;
363                                         outrf[3]= 1.0f;
364                                 }
365                         }
366                         else if (inbuf->type==CB_VEC3) {
367                                 for (; x>0; x--, outrf+= 4, inrf+= 3) {
368                                         outrf[0]= inrf[0];
369                                         outrf[1]= inrf[1];
370                                         outrf[2]= inrf[2];
371                                         outrf[3]= 1.0f;
372                                 }
373                         }
374                 }
375                 
376                 return outbuf;
377         }
378         return inbuf;
379 }
380
381 float *compbuf_get_pixel(CompBuf *cbuf, float *defcol, float *use, int x, int y, int xrad, int yrad)
382 {
383         if (cbuf) {
384                 if (cbuf->rect_procedural) {
385                         cbuf->rect_procedural(cbuf, use, (float)x/(float)xrad, (float)y/(float)yrad);
386                         return use;
387                 }
388                 else {
389                         static float col[4]= {0.0f, 0.0f, 0.0f, 0.0f};
390                         
391                         /* map coords */
392                         x-= cbuf->xof;
393                         y-= cbuf->yof;
394                         
395                         if (y<-cbuf->yrad || y>= -cbuf->yrad+cbuf->y) return col;
396                         if (x<-cbuf->xrad || x>= -cbuf->xrad+cbuf->x) return col;
397                         
398                         return cbuf->rect + cbuf->type*( (cbuf->yrad+y)*cbuf->x + (cbuf->xrad+x) );
399                 }
400         }
401         else return defcol;
402 }
403
404 /* **************************************************** */
405
406 static CompBuf *composit_check_compbuf(CompBuf *cbuf, int type, CompBuf *outbuf)
407 {
408         /* check type */
409         CompBuf *dbuf= typecheck_compbuf(cbuf, type);
410
411         /* if same as output and translated, duplicate so pixels don't interfere */
412         if (dbuf == outbuf && !dbuf->rect_procedural && (dbuf->xof || dbuf->yof))
413                 dbuf= dupalloc_compbuf(dbuf);
414         
415         return dbuf;
416 }
417
418 /* Pixel-to-Pixel operation, 1 Image in, 1 out */
419 void composit1_pixel_processor(bNode *node, CompBuf *out, CompBuf *src_buf, float *src_col,
420                                                                           void (*func)(bNode *, float *, float *), 
421                                                                           int src_type)
422 {
423         CompBuf *src_use;
424         float *outfp=out->rect, *srcfp;
425         float color[4]; /* local color if compbuf is procedural */
426         int xrad, yrad, x, y;
427         
428         src_use= composit_check_compbuf(src_buf, src_type, out);
429         
430         xrad= out->xrad;
431         yrad= out->yrad;
432         
433         for (y= -yrad; y<-yrad+out->y; y++) {
434                 for (x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) {
435                         srcfp= compbuf_get_pixel(src_use, src_col, color, x, y, xrad, yrad);
436                         func(node, outfp, srcfp);
437                 }
438         }
439         
440         if (src_use!=src_buf)
441                 free_compbuf(src_use);
442 }
443
444 /* Pixel-to-Pixel operation, 2 Images in, 1 out */
445 void composit2_pixel_processor(bNode *node, CompBuf *out, CompBuf *src_buf, float *src_col,
446                                                                           CompBuf *fac_buf, float *fac, void (*func)(bNode *, float *, float *, float *), 
447                                                                           int src_type, int fac_type)
448 {
449         CompBuf *src_use, *fac_use;
450         float *outfp=out->rect, *srcfp, *facfp;
451         float color[4]; /* local color if compbuf is procedural */
452         int xrad, yrad, x, y;
453         
454         src_use= composit_check_compbuf(src_buf, src_type, out);
455         fac_use= composit_check_compbuf(fac_buf, fac_type, out);
456
457         xrad= out->xrad;
458         yrad= out->yrad;
459         
460         for (y= -yrad; y<-yrad+out->y; y++) {
461                 for (x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) {
462                         srcfp= compbuf_get_pixel(src_use, src_col, color, x, y, xrad, yrad);
463                         facfp= compbuf_get_pixel(fac_use, fac, color, x, y, xrad, yrad);
464                         
465                         func(node, outfp, srcfp, facfp);
466                 }
467         }
468         if (src_use!=src_buf)
469                 free_compbuf(src_use);
470         if (fac_use!=fac_buf)
471                 free_compbuf(fac_use);
472 }
473
474 /* Pixel-to-Pixel operation, 3 Images in, 1 out */
475 void composit3_pixel_processor(bNode *node, CompBuf *out, CompBuf *src1_buf, float *src1_col, CompBuf *src2_buf, float *src2_col, 
476                                                                           CompBuf *fac_buf, float *fac, void (*func)(bNode *, float *, float *, float *, float *), 
477                                                                           int src1_type, int src2_type, int fac_type)
478 {
479         CompBuf *src1_use, *src2_use, *fac_use;
480         float *outfp=out->rect, *src1fp, *src2fp, *facfp;
481         float color[4]; /* local color if compbuf is procedural */
482         int xrad, yrad, x, y;
483         
484         src1_use= composit_check_compbuf(src1_buf, src1_type, out);
485         src2_use= composit_check_compbuf(src2_buf, src2_type, out);
486         fac_use= composit_check_compbuf(fac_buf, fac_type, out);
487         
488         xrad= out->xrad;
489         yrad= out->yrad;
490         
491         for (y= -yrad; y<-yrad+out->y; y++) {
492                 for (x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) {
493                         src1fp= compbuf_get_pixel(src1_use, src1_col, color, x, y, xrad, yrad);
494                         src2fp= compbuf_get_pixel(src2_use, src2_col, color, x, y, xrad, yrad);
495                         facfp= compbuf_get_pixel(fac_use, fac, color, x, y, xrad, yrad);
496                         
497                         func(node, outfp, src1fp, src2fp, facfp);
498                 }
499         }
500         
501         if (src1_use!=src1_buf)
502                 free_compbuf(src1_use);
503         if (src2_use!=src2_buf)
504                 free_compbuf(src2_use);
505         if (fac_use!=fac_buf)
506                 free_compbuf(fac_use);
507 }
508
509 /* Pixel-to-Pixel operation, 4 Images in, 1 out */
510 void composit4_pixel_processor(bNode *node, CompBuf *out, CompBuf *src1_buf, float *src1_col, CompBuf *fac1_buf, float *fac1, 
511                                                                           CompBuf *src2_buf, float *src2_col, CompBuf *fac2_buf, float *fac2, 
512                                                                           void (*func)(bNode *, float *, float *, float *, float *, float *), 
513                                                                           int src1_type, int fac1_type, int src2_type, int fac2_type)
514 {
515         CompBuf *src1_use, *src2_use, *fac1_use, *fac2_use;
516         float *outfp=out->rect, *src1fp, *src2fp, *fac1fp, *fac2fp;
517         float color[4]; /* local color if compbuf is procedural */
518         int xrad, yrad, x, y;
519         
520         src1_use= composit_check_compbuf(src1_buf, src1_type, out);
521         src2_use= composit_check_compbuf(src2_buf, src2_type, out);
522         fac1_use= composit_check_compbuf(fac1_buf, fac1_type, out);
523         fac2_use= composit_check_compbuf(fac2_buf, fac2_type, out);
524         
525         xrad= out->xrad;
526         yrad= out->yrad;
527         
528         for (y= -yrad; y<-yrad+out->y; y++) {
529                 for (x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) {
530                         src1fp= compbuf_get_pixel(src1_use, src1_col, color, x, y, xrad, yrad);
531                         src2fp= compbuf_get_pixel(src2_use, src2_col, color, x, y, xrad, yrad);
532                         fac1fp= compbuf_get_pixel(fac1_use, fac1, color, x, y, xrad, yrad);
533                         fac2fp= compbuf_get_pixel(fac2_use, fac2, color, x, y, xrad, yrad);
534                         
535                         func(node, outfp, src1fp, fac1fp, src2fp, fac2fp);
536                 }
537         }
538         
539         if (src1_use!=src1_buf)
540                 free_compbuf(src1_use);
541         if (src2_use!=src2_buf)
542                 free_compbuf(src2_use);
543         if (fac1_use!=fac1_buf)
544                 free_compbuf(fac1_use);
545         if (fac2_use!=fac2_buf)
546                 free_compbuf(fac2_use);
547 }
548
549
550 CompBuf *valbuf_from_rgbabuf(CompBuf *cbuf, int channel)
551 {
552         CompBuf *valbuf= alloc_compbuf(cbuf->x, cbuf->y, CB_VAL, 1);
553         float *valf, *rectf;
554         int tot;
555         
556         /* warning note: xof and yof are applied in pixelprocessor, but should be copied otherwise? */
557         valbuf->xof= cbuf->xof;
558         valbuf->yof= cbuf->yof;
559         
560         valf= valbuf->rect;
561
562         /* defaults to returning alpha channel */
563         if ((channel < CHAN_R) || (channel > CHAN_A)) channel = CHAN_A;
564
565         rectf= cbuf->rect + channel;
566         
567         for (tot= cbuf->x*cbuf->y; tot>0; tot--, valf++, rectf+=4)
568                 *valf= *rectf;
569         
570         return valbuf;
571 }
572
573 void valbuf_to_rgbabuf(CompBuf *valbuf, CompBuf *cbuf, int channel)
574 {
575         float *valf, *rectf;
576         int tot;
577
578         valf= valbuf->rect;
579
580         /* defaults to returning alpha channel */
581         if ((channel < CHAN_R) || (channel > CHAN_A)) channel = CHAN_A;
582
583         rectf = cbuf->rect + channel;
584
585         for (tot= cbuf->x*cbuf->y; tot>0; tot--, valf++, rectf+=4)
586                 *rectf = *valf;
587 }
588
589 static CompBuf *generate_procedural_preview(CompBuf *cbuf, int newx, int newy)
590 {
591         CompBuf *outbuf;
592         float *outfp;
593         int xrad, yrad, x, y;
594         
595         outbuf= alloc_compbuf(newx, newy, CB_RGBA, 1);
596
597         outfp= outbuf->rect;
598         xrad= outbuf->xrad;
599         yrad= outbuf->yrad;
600         
601         for (y= -yrad; y<-yrad+outbuf->y; y++)
602                 for (x= -xrad; x<-xrad+outbuf->x; x++, outfp+=outbuf->type)
603                         cbuf->rect_procedural(cbuf, outfp, (float)x/(float)xrad, (float)y/(float)yrad);
604
605         return outbuf;
606 }
607
608 void generate_preview(void *data, bNode *node, CompBuf *stackbuf)
609 {
610         RenderData *rd= data;
611         bNodePreview *preview= node->preview;
612         int xsize, ysize;
613         int profile_from= (rd->color_mgt_flag & R_COLOR_MANAGEMENT)? IB_PROFILE_LINEAR_RGB: IB_PROFILE_SRGB;
614         int predivide= (rd->color_mgt_flag & R_COLOR_MANAGEMENT_PREDIVIDE);
615         int dither= 0;
616         unsigned char *rect;
617         
618         if (preview && stackbuf) {
619                 CompBuf *cbuf, *stackbuf_use;
620                 
621                 if (stackbuf->rect==NULL && stackbuf->rect_procedural==NULL) return;
622                 
623                 stackbuf_use= typecheck_compbuf(stackbuf, CB_RGBA);
624
625                 if (stackbuf->x > stackbuf->y) {
626                         xsize= 140;
627                         ysize= (140*stackbuf->y)/stackbuf->x;
628                 }
629                 else {
630                         ysize= 140;
631                         xsize= (140*stackbuf->x)/stackbuf->y;
632                 }
633                 
634                 if (stackbuf_use->rect_procedural)
635                         cbuf= generate_procedural_preview(stackbuf_use, xsize, ysize);
636                 else
637                         cbuf= scalefast_compbuf(stackbuf_use, xsize, ysize);
638
639                 /* convert to byte for preview */
640                 rect= MEM_callocN(sizeof(unsigned char)*4*xsize*ysize, "bNodePreview.rect");
641
642                 IMB_buffer_byte_from_float(rect, cbuf->rect,
643                         4, dither, IB_PROFILE_SRGB, profile_from, predivide, 
644                         xsize, ysize, xsize, xsize);
645                 
646                 free_compbuf(cbuf);
647                 if (stackbuf_use!=stackbuf)
648                         free_compbuf(stackbuf_use);
649
650                 BLI_lock_thread(LOCK_PREVIEW);
651
652                 if (preview->rect)
653                         MEM_freeN(preview->rect);
654                 preview->xsize= xsize;
655                 preview->ysize= ysize;
656                 preview->rect= rect;
657
658                 BLI_unlock_thread(LOCK_PREVIEW);
659         }
660 }
661
662 void do_rgba_to_yuva(bNode *UNUSED(node), float *out, float *in)
663 {
664         rgb_to_yuv(in[0], in[1], in[2], &out[0], &out[1], &out[2]);
665         out[3]=in[3];
666 }
667
668 void do_rgba_to_hsva(bNode *UNUSED(node), float *out, float *in)
669 {
670         rgb_to_hsv(in[0], in[1], in[2], &out[0], &out[1], &out[2]);
671         out[3]=in[3];
672 }
673
674 void do_rgba_to_ycca(bNode *UNUSED(node), float *out, float *in)
675 {
676         rgb_to_ycc(in[0], in[1], in[2], &out[0], &out[1], &out[2], BLI_YCC_ITU_BT601);
677         out[3]=in[3];
678 }
679
680 void do_yuva_to_rgba(bNode *UNUSED(node), float *out, float *in)
681 {
682         yuv_to_rgb(in[0], in[1], in[2], &out[0], &out[1], &out[2]);
683         out[3]=in[3];
684 }
685
686 void do_hsva_to_rgba(bNode *UNUSED(node), float *out, float *in)
687 {
688         hsv_to_rgb(in[0], in[1], in[2], &out[0], &out[1], &out[2]);
689         out[3]=in[3];
690 }
691
692 void do_ycca_to_rgba(bNode *UNUSED(node), float *out, float *in)
693 {
694         ycc_to_rgb(in[0], in[1], in[2], &out[0], &out[1], &out[2], BLI_YCC_ITU_BT601);
695         out[3]=in[3];
696 }
697
698 void do_copy_rgba(bNode *UNUSED(node), float *out, float *in)
699 {
700         copy_v4_v4(out, in);
701 }
702
703 void do_copy_rgb(bNode *UNUSED(node), float *out, float *in)
704 {
705         copy_v3_v3(out, in);
706         out[3]= 1.0f;
707 }
708
709 void do_copy_value(bNode *UNUSED(node), float *out, float *in)
710 {
711         out[0]= in[0];
712 }
713
714 void do_copy_a_rgba(bNode *UNUSED(node), float *out, float *in, float *fac)
715 {
716         copy_v3_v3(out, in);
717         out[3]= *fac;
718 }
719
720 /* only accepts RGBA buffers */
721 void gamma_correct_compbuf(CompBuf *img, int inversed)
722 {
723         float *drect;
724         int x;
725
726         if (img->type!=CB_RGBA) return;
727
728         drect= img->rect;
729         if (inversed) {
730                 for (x=img->x*img->y; x>0; x--, drect+=4) {
731                         if (drect[0]>0.0f) drect[0]= sqrt(drect[0]); else drect[0]= 0.0f;
732                         if (drect[1]>0.0f) drect[1]= sqrt(drect[1]); else drect[1]= 0.0f;
733                         if (drect[2]>0.0f) drect[2]= sqrt(drect[2]); else drect[2]= 0.0f;
734                 }
735         }
736         else {
737                 for (x=img->x*img->y; x>0; x--, drect+=4) {
738                         if (drect[0]>0.0f) drect[0]*= drect[0]; else drect[0]= 0.0f;
739                         if (drect[1]>0.0f) drect[1]*= drect[1]; else drect[1]= 0.0f;
740                         if (drect[2]>0.0f) drect[2]*= drect[2]; else drect[2]= 0.0f;
741                 }
742         }
743 }
744
745 void premul_compbuf(CompBuf *img, int inversed)
746 {
747         float *drect;
748         int x;
749
750         if (img->type!=CB_RGBA) return;
751
752         drect= img->rect;
753         if (inversed) {
754                 for (x=img->x*img->y; x>0; x--, drect+=4) {
755                         if (fabsf(drect[3]) < 1e-5f) {
756                                 drect[0]= 0.0f;
757                                 drect[1]= 0.0f;
758                                 drect[2]= 0.0f;
759                         }
760                         else {
761                                 drect[0] /= drect[3];
762                                 drect[1] /= drect[3];
763                                 drect[2] /= drect[3];
764                         }
765                 }
766         }
767         else {
768                 for (x=img->x*img->y; x>0; x--, drect+=4) {
769                         drect[0] *= drect[3];
770                         drect[1] *= drect[3];
771                         drect[2] *= drect[3];
772                 }
773         }
774 }
775
776
777
778 /*
779  *  2D Fast Hartley Transform, used for convolution
780  */
781
782 typedef float fREAL;
783
784 // returns next highest power of 2 of x, as well it's log2 in L2
785 static unsigned int nextPow2(unsigned int x, unsigned int* L2)
786 {
787         unsigned int pw, x_notpow2 = x & (x-1);
788         *L2 = 0;
789         while (x>>=1) ++(*L2);
790         pw = 1 << (*L2);
791         if (x_notpow2) { (*L2)++;  pw<<=1; }
792         return pw;
793 }
794
795 //------------------------------------------------------------------------------
796
797 // from FXT library by Joerg Arndt, faster in order bitreversal
798 // use: r = revbin_upd(r, h) where h = N>>1
799 static unsigned int revbin_upd(unsigned int r, unsigned int h)
800 {
801         while (!((r^=h)&h)) h >>= 1;
802         return r;
803 }
804 //------------------------------------------------------------------------------
805 static void FHT(fREAL* data, unsigned int M, unsigned int inverse)
806 {
807         double tt, fc, dc, fs, ds, a = M_PI;
808         fREAL t1, t2;
809         int n2, bd, bl, istep, k, len = 1 << M, n = 1;
810
811         int i, j = 0;
812         unsigned int Nh = len >> 1;
813         for (i=1;i<(len-1);++i) {
814                 j = revbin_upd(j, Nh);
815                 if (j>i) {
816                         t1 = data[i];
817                         data[i] = data[j];
818                         data[j] = t1;
819                 }
820         }
821
822         do {
823                 fREAL* data_n = &data[n];
824
825                 istep = n << 1;
826                 for (k=0; k<len; k+=istep) {
827                         t1 = data_n[k];
828                         data_n[k] = data[k] - t1;
829                         data[k] += t1;
830                 }
831
832                 n2 = n >> 1;
833                 if (n>2) {
834                         fc = dc = cos(a);
835                         fs = ds = sqrt(1.0 - fc*fc); //sin(a);
836                         bd = n-2;
837                         for (bl=1; bl<n2; bl++) {
838                                 fREAL* data_nbd = &data_n[bd];
839                                 fREAL* data_bd = &data[bd];
840                                 for (k=bl; k<len; k+=istep) {
841                                         t1 = fc*data_n[k] + fs*data_nbd[k];
842                                         t2 = fs*data_n[k] - fc*data_nbd[k];
843                                         data_n[k] = data[k] - t1;
844                                         data_nbd[k] = data_bd[k] - t2;
845                                         data[k] += t1;
846                                         data_bd[k] += t2;
847                                 }
848                                 tt = fc*dc - fs*ds;
849                                 fs = fs*dc + fc*ds;
850                                 fc = tt;
851                                 bd -= 2;
852                         }
853                 }
854
855                 if (n>1) {
856                         for (k=n2; k<len; k+=istep) {
857                                 t1 = data_n[k];
858                                 data_n[k] = data[k] - t1;
859                                 data[k] += t1;
860                         }
861                 }
862
863                 n = istep;
864                 a *= 0.5;
865         } while (n<len);
866
867         if (inverse) {
868                 fREAL sc = (fREAL)1 / (fREAL)len;
869                 for (k=0; k<len; ++k)
870                         data[k] *= sc;
871         }
872 }
873 //------------------------------------------------------------------------------
874 /* 2D Fast Hartley Transform, Mx/My -> log2 of width/height,
875         nzp -> the row where zero pad data starts,
876         inverse -> see above */
877 static void FHT2D(fREAL *data, unsigned int Mx, unsigned int My,
878                 unsigned int nzp, unsigned int inverse)
879 {
880         unsigned int i, j, Nx, Ny, maxy;
881         fREAL t;
882
883         Nx = 1 << Mx;
884         Ny = 1 << My;
885
886         // rows (forward transform skips 0 pad data)
887         maxy = inverse ? Ny : nzp;
888         for (j=0; j<maxy; ++j)
889                 FHT(&data[Nx*j], Mx, inverse);
890
891         // transpose data
892         if (Nx==Ny) {  // square
893                 for (j=0; j<Ny; ++j)
894                         for (i=j+1; i<Nx; ++i) {
895                                 unsigned int op = i + (j << Mx), np = j + (i << My);
896                                 t=data[op], data[op]=data[np], data[np]=t;
897                         }
898         }
899         else {  // rectangular
900                 unsigned int k, Nym = Ny-1, stm = 1 << (Mx + My);
901                 for (i=0; stm>0; i++) {
902                         #define PRED(k) (((k & Nym) << Mx) + (k >> My))
903                         for (j=PRED(i); j>i; j=PRED(j));
904                         if (j < i) continue;
905                         for (k=i, j=PRED(i); j!=i; k=j, j=PRED(j), stm--) {
906                                 t=data[j], data[j]=data[k], data[k]=t;
907                         }
908                         #undef PRED
909                         stm--;
910                 }
911         }
912         // swap Mx/My & Nx/Ny
913         i = Nx, Nx = Ny, Ny = i;
914         i = Mx, Mx = My, My = i;
915
916         // now columns == transposed rows
917         for (j=0; j<Ny; ++j)
918                 FHT(&data[Nx*j], Mx, inverse);
919
920         // finalize
921         for (j=0; j<=(Ny >> 1); j++) {
922                 unsigned int jm = (Ny - j) & (Ny-1);
923                 unsigned int ji = j << Mx;
924                 unsigned int jmi = jm << Mx;
925                 for (i=0; i<=(Nx >> 1); i++) {
926                         unsigned int im = (Nx - i) & (Nx-1);
927                         fREAL A = data[ji + i];
928                         fREAL B = data[jmi + i];
929                         fREAL C = data[ji + im];
930                         fREAL D = data[jmi + im];
931                         fREAL E = (fREAL)0.5*((A + D) - (B + C));
932                         data[ji + i] = A - E;
933                         data[jmi + i] = B + E;
934                         data[ji + im] = C + E;
935                         data[jmi + im] = D - E;
936                 }
937         }
938
939 }
940
941 //------------------------------------------------------------------------------
942
943 /* 2D convolution calc, d1 *= d2, M/N - > log2 of width/height */
944 static void fht_convolve(fREAL* d1, fREAL* d2, unsigned int M, unsigned int N)
945 {
946         fREAL a, b;
947         unsigned int i, j, k, L, mj, mL;
948         unsigned int m = 1 << M, n = 1 << N;
949         unsigned int m2 = 1 << (M-1), n2 = 1 << (N-1);
950         unsigned int mn2 = m << (N-1);
951
952         d1[0] *= d2[0];
953         d1[mn2] *= d2[mn2];
954         d1[m2] *= d2[m2];
955         d1[m2 + mn2] *= d2[m2 + mn2];
956         for (i=1; i<m2; i++) {
957                 k = m - i;
958                 a = d1[i]*d2[i] - d1[k]*d2[k];
959                 b = d1[k]*d2[i] + d1[i]*d2[k];
960                 d1[i] = (b + a)*(fREAL)0.5;
961                 d1[k] = (b - a)*(fREAL)0.5;
962                 a = d1[i + mn2]*d2[i + mn2] - d1[k + mn2]*d2[k + mn2];
963                 b = d1[k + mn2]*d2[i + mn2] + d1[i + mn2]*d2[k + mn2];
964                 d1[i + mn2] = (b + a)*(fREAL)0.5;
965                 d1[k + mn2] = (b - a)*(fREAL)0.5;
966         }
967         for (j=1; j<n2; j++) {
968                 L = n - j;
969                 mj = j << M;
970                 mL = L << M;
971                 a = d1[mj]*d2[mj] - d1[mL]*d2[mL];
972                 b = d1[mL]*d2[mj] + d1[mj]*d2[mL];
973                 d1[mj] = (b + a)*(fREAL)0.5;
974                 d1[mL] = (b - a)*(fREAL)0.5;
975                 a = d1[m2 + mj]*d2[m2 + mj] - d1[m2 + mL]*d2[m2 + mL];
976                 b = d1[m2 + mL]*d2[m2 + mj] + d1[m2 + mj]*d2[m2 + mL];
977                 d1[m2 + mj] = (b + a)*(fREAL)0.5;
978                 d1[m2 + mL] = (b - a)*(fREAL)0.5;
979         }
980         for (i=1; i<m2; i++) {
981                 k = m - i;
982                 for (j=1; j<n2; j++) {
983                         L = n - j;
984                         mj = j << M;
985                         mL = L << M;
986                         a = d1[i + mj]*d2[i + mj] - d1[k + mL]*d2[k + mL];
987                         b = d1[k + mL]*d2[i + mj] + d1[i + mj]*d2[k + mL];
988                         d1[i + mj] = (b + a)*(fREAL)0.5;
989                         d1[k + mL] = (b - a)*(fREAL)0.5;
990                         a = d1[i + mL]*d2[i + mL] - d1[k + mj]*d2[k + mj];
991                         b = d1[k + mj]*d2[i + mL] + d1[i + mL]*d2[k + mj];
992                         d1[i + mL] = (b + a)*(fREAL)0.5;
993                         d1[k + mj] = (b - a)*(fREAL)0.5;
994                 }
995         }
996 }
997
998 //------------------------------------------------------------------------------
999
1000 void convolve(CompBuf* dst, CompBuf* in1, CompBuf* in2)
1001 {
1002         fREAL *data1, *data2, *fp;
1003         unsigned int w2, h2, hw, hh, log2_w, log2_h;
1004         fRGB wt, *colp;
1005         int x, y, ch;
1006         int xbl, ybl, nxb, nyb, xbsz, ybsz;
1007         int in2done = FALSE;
1008
1009         CompBuf* rdst = alloc_compbuf(in1->x, in1->y, in1->type, 1);
1010
1011         // convolution result width & height
1012         w2 = 2*in2->x - 1;
1013         h2 = 2*in2->y - 1;
1014         // FFT pow2 required size & log2
1015         w2 = nextPow2(w2, &log2_w);
1016         h2 = nextPow2(h2, &log2_h);
1017
1018         // alloc space
1019         data1 = (fREAL*)MEM_callocN(3*w2*h2*sizeof(fREAL), "convolve_fast FHT data1");
1020         data2 = (fREAL*)MEM_callocN(w2*h2*sizeof(fREAL), "convolve_fast FHT data2");
1021
1022         // normalize convolutor
1023         wt[0] = wt[1] = wt[2] = 0.f;
1024         for (y=0; y<in2->y; y++) {
1025                 colp = (fRGB*)&in2->rect[y*in2->x*in2->type];
1026                 for (x=0; x<in2->x; x++)
1027                         add_v3_v3(wt, colp[x]);
1028         }
1029         if (wt[0] != 0.f) wt[0] = 1.f/wt[0];
1030         if (wt[1] != 0.f) wt[1] = 1.f/wt[1];
1031         if (wt[2] != 0.f) wt[2] = 1.f/wt[2];
1032         for (y=0; y<in2->y; y++) {
1033                 colp = (fRGB*)&in2->rect[y*in2->x*in2->type];
1034                 for (x=0; x<in2->x; x++)
1035                         mul_v3_v3(colp[x], wt);
1036         }
1037
1038         // copy image data, unpacking interleaved RGBA into separate channels
1039         // only need to calc data1 once
1040
1041         // block add-overlap
1042         hw = in2->x >> 1;
1043         hh = in2->y >> 1;
1044         xbsz = (w2 + 1) - in2->x;
1045         ybsz = (h2 + 1) - in2->y;
1046         nxb = in1->x / xbsz;
1047         if (in1->x % xbsz) nxb++;
1048         nyb = in1->y / ybsz;
1049         if (in1->y % ybsz) nyb++;
1050         for (ybl=0; ybl<nyb; ybl++) {
1051                 for (xbl=0; xbl<nxb; xbl++) {
1052
1053                         // each channel one by one
1054                         for (ch=0; ch<3; ch++) {
1055                                 fREAL* data1ch = &data1[ch*w2*h2];
1056
1057                                 // only need to calc fht data from in2 once, can re-use for every block
1058                                 if (!in2done) {
1059                                         // in2, channel ch -> data1
1060                                         for (y=0; y<in2->y; y++) {
1061                                                 fp = &data1ch[y*w2];
1062                                                 colp = (fRGB*)&in2->rect[y*in2->x*in2->type];
1063                                                 for (x=0; x<in2->x; x++)
1064                                                         fp[x] = colp[x][ch];
1065                                         }
1066                                 }
1067
1068                                 // in1, channel ch -> data2
1069                                 memset(data2, 0, w2*h2*sizeof(fREAL));
1070                                 for (y=0; y<ybsz; y++) {
1071                                         int yy = ybl*ybsz + y;
1072                                         if (yy >= in1->y) continue;
1073                                         fp = &data2[y*w2];
1074                                         colp = (fRGB*)&in1->rect[yy*in1->x*in1->type];
1075                                         for (x=0; x<xbsz; x++) {
1076                                                 int xx = xbl*xbsz + x;
1077                                                 if (xx >= in1->x) continue;
1078                                                 fp[x] = colp[xx][ch];
1079                                         }
1080                                 }
1081
1082                                 // forward FHT
1083                                 // zero pad data start is different for each == height+1
1084                                 if (!in2done) FHT2D(data1ch, log2_w, log2_h, in2->y+1, 0);
1085                                 FHT2D(data2, log2_w, log2_h, in2->y+1, 0);
1086
1087                                 // FHT2D transposed data, row/col now swapped
1088                                 // convolve & inverse FHT
1089                                 fht_convolve(data2, data1ch, log2_h, log2_w);
1090                                 FHT2D(data2, log2_h, log2_w, 0, 1);
1091                                 // data again transposed, so in order again
1092
1093                                 // overlap-add result
1094                                 for (y=0; y<(int)h2; y++) {
1095                                         const int yy = ybl*ybsz + y - hh;
1096                                         if ((yy < 0) || (yy >= in1->y)) continue;
1097                                         fp = &data2[y*w2];
1098                                         colp = (fRGB*)&rdst->rect[yy*in1->x*in1->type];
1099                                         for (x=0; x<(int)w2; x++) {
1100                                                 const int xx = xbl*xbsz + x - hw;
1101                                                 if ((xx < 0) || (xx >= in1->x)) continue;
1102                                                 colp[xx][ch] += fp[x];
1103                                         }
1104                                 }
1105
1106                         }
1107                         in2done = TRUE;
1108                 }
1109         }
1110
1111         MEM_freeN(data2);
1112         MEM_freeN(data1);
1113         memcpy(dst->rect, rdst->rect, sizeof(float)*dst->x*dst->y*dst->type);
1114         free_compbuf(rdst);
1115 }
1116
1117
1118 /*
1119  *
1120  * Utility functions qd_* should probably be intergrated better with other functions here.
1121  *
1122  */
1123 // sets fcol to pixelcolor at (x, y)
1124 void qd_getPixel(CompBuf* src, int x, int y, float* col)
1125 {
1126         if (src->rect_procedural) {
1127                 float bc[4];
1128                 src->rect_procedural(src, bc, (float)x/(float)src->xrad, (float)y/(float)src->yrad);
1129
1130                 switch (src->type) {
1131                         /* these fallthrough to get all the channels */
1132                         case CB_RGBA: col[3]=bc[3]; 
1133                         case CB_VEC3: col[2]=bc[2];
1134                         case CB_VEC2: col[1]=bc[1];
1135                         case CB_VAL: col[0]=bc[0];
1136                 }
1137         }
1138         else if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) {
1139                 float* bc = &src->rect[(x + y*src->x)*src->type];
1140                 switch (src->type) {
1141                         /* these fallthrough to get all the channels */
1142                         case CB_RGBA: col[3]=bc[3]; 
1143                         case CB_VEC3: col[2]=bc[2];
1144                         case CB_VEC2: col[1]=bc[1];
1145                         case CB_VAL: col[0]=bc[0];
1146                 }
1147         }
1148         else {
1149                 switch (src->type) {
1150                         /* these fallthrough to get all the channels */
1151                         case CB_RGBA: col[3]=0.0; 
1152                         case CB_VEC3: col[2]=0.0; 
1153                         case CB_VEC2: col[1]=0.0; 
1154                         case CB_VAL: col[0]=0.0; 
1155                 }
1156         }
1157 }
1158
1159 // sets pixel (x, y) to color col
1160 void qd_setPixel(CompBuf* src, int x, int y, float* col)
1161 {
1162         if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) {
1163                 float* bc = &src->rect[(x + y*src->x)*src->type];
1164                 switch (src->type) {
1165                         /* these fallthrough to get all the channels */
1166                         case CB_RGBA: bc[3]=col[3]; 
1167                         case CB_VEC3: bc[2]=col[2];
1168                         case CB_VEC2: bc[1]=col[1];
1169                         case CB_VAL: bc[0]=col[0];
1170                 }
1171         }
1172 }
1173
1174 // adds fcol to pixelcolor (x, y)
1175 void qd_addPixel(CompBuf* src, int x, int y, float* col)
1176 {
1177         if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) {
1178                 float* bc = &src->rect[(x + y*src->x)*src->type];
1179                 bc[0] += col[0], bc[1] += col[1], bc[2] += col[2];
1180         }
1181 }
1182
1183 // multiplies pixel by factor value f
1184 void qd_multPixel(CompBuf* src, int x, int y, float f)
1185 {
1186         if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) {
1187                 float* bc = &src->rect[(x + y*src->x)*src->type];
1188                 bc[0] *= f, bc[1] *= f, bc[2] *= f;
1189         }
1190 }
1191
1192 // bilinear interpolation with wraparound
1193 void qd_getPixelLerpWrap(CompBuf* src, float u, float v, float* col)
1194 {
1195         const float ufl = floor(u), vfl = floor(v);
1196         const int nx = (int)ufl % src->x, ny = (int)vfl % src->y;
1197         const int x1 = (nx < 0) ? (nx + src->x) : nx;
1198         const int y1 = (ny < 0) ? (ny + src->y) : ny;
1199         const int x2 = (x1 + 1) % src->x, y2 = (y1 + 1) % src->y;
1200         const float* c00 = &src->rect[(x1 + y1*src->x)*src->type];
1201         const float* c10 = &src->rect[(x2 + y1*src->x)*src->type];
1202         const float* c01 = &src->rect[(x1 + y2*src->x)*src->type];
1203         const float* c11 = &src->rect[(x2 + y2*src->x)*src->type];
1204         const float uf = u - ufl, vf = v - vfl;
1205         const float w00=(1.f-uf)*(1.f-vf), w10=uf*(1.f-vf), w01=(1.f-uf)*vf, w11=uf*vf;
1206         col[0] = w00*c00[0] + w10*c10[0] + w01*c01[0] + w11*c11[0];
1207         if (src->type != CB_VAL) {
1208                 col[1] = w00*c00[1] + w10*c10[1] + w01*c01[1] + w11*c11[1];
1209                 col[2] = w00*c00[2] + w10*c10[2] + w01*c01[2] + w11*c11[2];
1210                 col[3] = w00*c00[3] + w10*c10[3] + w01*c01[3] + w11*c11[3];
1211         }
1212 }
1213
1214 // as above, without wrap around
1215 void qd_getPixelLerp(CompBuf* src, float u, float v, float* col)
1216 {
1217         const float ufl = floor(u), vfl = floor(v);
1218         const int x1 = (int)ufl, y1 = (int)vfl;
1219         const int x2 = (int)ceil(u), y2 = (int)ceil(v);
1220         if ((x2 >= 0) && (y2 >= 0) && (x1 < src->x) && (y1 < src->y)) {
1221                 const float B[4] = {0, 0, 0, 0};
1222                 const int ox1 = (x1 < 0), oy1 = (y1 < 0), ox2 = (x2 >= src->x), oy2 = (y2 >= src->y);
1223                 const float* c00 = (ox1 || oy1) ? B : &src->rect[(x1 + y1*src->x)*src->type];
1224                 const float* c10 = (ox2 || oy1) ? B : &src->rect[(x2 + y1*src->x)*src->type];
1225                 const float* c01 = (ox1 || oy2) ? B : &src->rect[(x1 + y2*src->x)*src->type];
1226                 const float* c11 = (ox2 || oy2) ? B : &src->rect[(x2 + y2*src->x)*src->type];
1227                 const float uf = u - ufl, vf = v - vfl;
1228                 const float w00=(1.f-uf)*(1.f-vf), w10=uf*(1.f-vf), w01=(1.f-uf)*vf, w11=uf*vf;
1229                 col[0] = w00*c00[0] + w10*c10[0] + w01*c01[0] + w11*c11[0];
1230                 if (src->type != CB_VAL) {
1231                         col[1] = w00*c00[1] + w10*c10[1] + w01*c01[1] + w11*c11[1];
1232                         col[2] = w00*c00[2] + w10*c10[2] + w01*c01[2] + w11*c11[2];
1233                         col[3] = w00*c00[3] + w10*c10[3] + w01*c01[3] + w11*c11[3];
1234                 }
1235         }
1236         else col[0] = col[1] = col[2] = col[3] = 0.f;
1237 }
1238
1239 // as above, sampling only one channel
1240 void qd_getPixelLerpChan(CompBuf* src, float u, float v, int chan, float* out)
1241 {
1242         const float ufl = floor(u), vfl = floor(v);
1243         const int x1 = (int)ufl, y1 = (int)vfl;
1244         const int x2 = (int)ceil(u), y2 = (int)ceil(v);
1245         if (chan >= src->type) chan = 0;
1246         if ((x2 >= 0) && (y2 >= 0) && (x1 < src->x) && (y1 < src->y)) {
1247                 const float B[4] = {0, 0, 0, 0};
1248                 const int ox1 = (x1 < 0), oy1 = (y1 < 0), ox2 = (x2 >= src->x), oy2 = (y2 >= src->y);
1249                 const float* c00 = (ox1 || oy1) ? B : &src->rect[(x1 + y1*src->x)*src->type + chan];
1250                 const float* c10 = (ox2 || oy1) ? B : &src->rect[(x2 + y1*src->x)*src->type + chan];
1251                 const float* c01 = (ox1 || oy2) ? B : &src->rect[(x1 + y2*src->x)*src->type + chan];
1252                 const float* c11 = (ox2 || oy2) ? B : &src->rect[(x2 + y2*src->x)*src->type + chan];
1253                 const float uf = u - ufl, vf = v - vfl;
1254                 const float w00=(1.f-uf)*(1.f-vf), w10=uf*(1.f-vf), w01=(1.f-uf)*vf, w11=uf*vf;
1255                 out[0] = w00*c00[0] + w10*c10[0] + w01*c01[0] + w11*c11[0];
1256         }
1257         else *out = 0.f;
1258 }
1259
1260
1261 CompBuf* qd_downScaledCopy(CompBuf* src, int scale)
1262 {
1263         CompBuf* fbuf;
1264         if (scale <= 1)
1265                 fbuf = dupalloc_compbuf(src);
1266         else {
1267                 int nw = src->x/scale, nh = src->y/scale;
1268                 if ((2*(src->x % scale)) > scale) nw++;
1269                 if ((2*(src->y % scale)) > scale) nh++;
1270                 fbuf = alloc_compbuf(nw, nh, src->type, 1);
1271                 {
1272                         int x, y, xx, yy, sx, sy, mx, my;
1273                         float colsum[4] = {0.0f, 0.0f, 0.0f, 0.0f};
1274                         float fscale = 1.f/(float)(scale*scale);
1275                         for (y=0; y<nh; y++) {
1276                                 fRGB* fcolp = (fRGB*)&fbuf->rect[y*fbuf->x*fbuf->type];
1277                                 yy = y*scale;
1278                                 my = yy + scale;
1279                                 if (my > src->y) my = src->y;
1280                                 for (x=0; x<nw; x++) {
1281                                         xx = x*scale;
1282                                         mx = xx + scale;
1283                                         if (mx > src->x) mx = src->x;
1284                                         zero_v3(colsum);
1285                                         for (sy=yy; sy<my; sy++) {
1286                                                 fRGB* scolp = (fRGB*)&src->rect[sy*src->x*src->type];
1287                                                 for (sx=xx; sx<mx; sx++)
1288                                                         add_v3_v3(colsum, scolp[sx]);
1289                                         }
1290                                         mul_v3_fl(colsum, fscale);
1291                                         copy_v3_v3(fcolp[x], colsum);
1292                                 }
1293                         }
1294                 }
1295         }
1296         return fbuf;
1297 }
1298
1299 // fast g.blur, per channel
1300 // xy var. bits 1 & 2 ca be used to blur in x or y direction separately
1301 void IIR_gauss(CompBuf* src, float sigma, int chan, int xy)
1302 {
1303         double q, q2, sc, cf[4], tsM[9], tsu[3], tsv[3];
1304         double *X, *Y, *W;
1305         const unsigned int src_width = src->x;
1306         const unsigned int src_height = src->y;
1307         unsigned int i, x, y, sz;
1308
1309         // <0.5 not valid, though can have a possibly useful sort of sharpening effect
1310         if (sigma < 0.5f) return;
1311
1312         if ((xy < 1) || (xy > 3)) xy = 3;
1313
1314         // XXX The YVV macro defined below explicitly expects sources of at least 3x3 pixels,
1315         //     so just skiping blur along faulty direction if src's def is below that limit!
1316         if (src_width < 3) xy &= ~(int) 1;
1317         if (src_height < 3) xy &= ~(int) 2;
1318         if (xy < 1) return;
1319
1320         // see "Recursive Gabor Filtering" by Young/VanVliet
1321         // all factors here in double.prec. Required, because for single.prec it seems to blow up if sigma > ~200
1322         if (sigma >= 3.556f)
1323                 q = 0.9804f * (sigma - 3.556f) + 2.5091f;
1324         else     // sigma >= 0.5
1325                 q = (0.0561f * sigma + 0.5784f) * sigma - 0.2568f;
1326         q2 = q * q;
1327         sc = (1.1668 + q) * (3.203729649  + (2.21566 + q) * q);
1328         // no gabor filtering here, so no complex multiplies, just the regular coefs.
1329         // all negated here, so as not to have to recalc Triggs/Sdika matrix
1330         cf[1] = q * (5.788961737 + (6.76492 + 3.0 * q) * q) / sc;
1331         cf[2] = -q2 * (3.38246 + 3.0 * q) / sc;
1332         // 0 & 3 unchanged
1333         cf[3] = q2 * q / sc;
1334         cf[0] = 1.0 - cf[1] - cf[2] - cf[3];
1335
1336         // Triggs/Sdika border corrections,
1337         // it seems to work, not entirely sure if it is actually totally correct,
1338         // Besides J.M.Geusebroek's anigauss.c (see http://www.science.uva.nl/~mark),
1339         // found one other implementation by Cristoph Lampert,
1340         // but neither seem to be quite the same, result seems to be ok so far anyway.
1341         // Extra scale factor here to not have to do it in filter,
1342         // though maybe this had something to with the precision errors
1343         sc = cf[0] / ((1.0 + cf[1] - cf[2] + cf[3]) * (1.0 - cf[1] - cf[2] - cf[3]) * (1.0 + cf[2] + (cf[1] - cf[3]) * cf[3]));
1344         tsM[0] = sc * (-cf[3] * cf[1] + 1.0 - cf[3] * cf[3] - cf[2]);
1345         tsM[1] = sc * ((cf[3] + cf[1]) * (cf[2] + cf[3] * cf[1]));
1346         tsM[2] = sc * (cf[3] * (cf[1] + cf[3] * cf[2]));
1347         tsM[3] = sc * (cf[1] + cf[3] * cf[2]);
1348         tsM[4] = sc * (-(cf[2] - 1.0) * (cf[2] + cf[3] * cf[1]));
1349         tsM[5] = sc * (-(cf[3] * cf[1] + cf[3] * cf[3] + cf[2] - 1.0) * cf[3]);
1350         tsM[6] = sc * (cf[3] * cf[1] + cf[2] + cf[1] * cf[1] - cf[2] * cf[2]);
1351         tsM[7] = sc * (cf[1] * cf[2] + cf[3] * cf[2] * cf[2] - cf[1] * cf[3] * cf[3] - cf[3] * cf[3] * cf[3] - cf[3] * cf[2] + cf[3]);
1352         tsM[8] = sc * (cf[3] * (cf[1] + cf[3] * cf[2]));
1353
1354 #define YVV(L)                                                                          \
1355 {                                                                                       \
1356         W[0] = cf[0] * X[0] + cf[1] * X[0] + cf[2] * X[0] + cf[3] * X[0];                   \
1357         W[1] = cf[0] * X[1] + cf[1] * W[0] + cf[2] * X[0] + cf[3] * X[0];                   \
1358         W[2] = cf[0] * X[2] + cf[1] * W[1] + cf[2] * W[0] + cf[3] * X[0];                   \
1359         for (i = 3; i < L; i++) {                                                           \
1360                 W[i] = cf[0] * X[i] + cf[1] * W[i - 1] + cf[2] * W[i - 2] + cf[3] * W[i - 3];   \
1361         }                                                                                   \
1362         tsu[0] = W[L - 1] - X[L - 1];                                                       \
1363         tsu[1] = W[L - 2] - X[L - 1];                                                       \
1364         tsu[2] = W[L - 3] - X[L - 1];                                                       \
1365         tsv[0] = tsM[0] * tsu[0] + tsM[1] * tsu[1] + tsM[2] * tsu[2] + X[L - 1];            \
1366         tsv[1] = tsM[3] * tsu[0] + tsM[4] * tsu[1] + tsM[5] * tsu[2] + X[L - 1];            \
1367         tsv[2] = tsM[6] * tsu[0] + tsM[7] * tsu[1] + tsM[8] * tsu[2] + X[L - 1];            \
1368         Y[L - 1] = cf[0] * W[L - 1] + cf[1] * tsv[0] + cf[2] * tsv[1] + cf[3] * tsv[2];     \
1369         Y[L - 2] = cf[0] * W[L - 2] + cf[1] * Y[L - 1] + cf[2] * tsv[0] + cf[3] * tsv[1];   \
1370         Y[L - 3] = cf[0] * W[L - 3] + cf[1] * Y[L - 2] + cf[2] * Y[L - 1] + cf[3] * tsv[0]; \
1371         /* 'i != UINT_MAX' is really 'i >= 0', but necessary for unsigned int wrapping */   \
1372         for (i = L - 4; i != UINT_MAX; i--) {                                               \
1373                 Y[i] = cf[0] * W[i] + cf[1] * Y[i + 1] + cf[2] * Y[i + 2] + cf[3] * Y[i + 3];   \
1374         }                                                                                   \
1375 } (void)0
1376
1377         // intermediate buffers
1378         sz = MAX2(src_width, src_height);
1379         X = MEM_callocN(sz * sizeof(double), "IIR_gauss X buf");
1380         Y = MEM_callocN(sz * sizeof(double), "IIR_gauss Y buf");
1381         W = MEM_callocN(sz * sizeof(double), "IIR_gauss W buf");
1382         if (xy & 1) {       // H
1383                 for (y = 0; y < src_height; ++y) {
1384                         const int yx = y * src_width;
1385                         for (x = 0; x < src_width; ++x)
1386                                 X[x] = src->rect[(x + yx) * src->type + chan];
1387                         YVV(src_width);
1388                         for (x = 0; x < src_width; ++x)
1389                                 src->rect[(x + yx) * src->type + chan] = Y[x];
1390                 }
1391         }
1392         if (xy & 2) {       // V
1393                 for (x = 0; x < src_width; ++x) {
1394                         for (y = 0; y < src_height; ++y)
1395                                 X[y] = src->rect[(x + y * src_width) * src->type + chan];
1396                         YVV(src_height);
1397                         for (y = 0; y < src_height; ++y)
1398                                 src->rect[(x + y * src_width) * src->type + chan] = Y[y];
1399                 }
1400         }
1401
1402         MEM_freeN(X);
1403         MEM_freeN(W);
1404         MEM_freeN(Y);
1405 #undef YVV
1406 }
1407