2 * ***** BEGIN GPL LICENSE BLOCK *****
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * The Original Code is Copyright (C) 2012 Blender Foundation.
19 * All rights reserved.
21 * Contributor(s): Blender Foundation,
24 * ***** END GPL LICENSE BLOCK *****
27 /** \file blender/blenkernel/intern/mask_rasterize.c
31 #include "MEM_guardedalloc.h"
33 #include "DNA_vec_types.h"
34 #include "DNA_mask_types.h"
36 #include "BLI_utildefines.h"
37 #include "BLI_scanfill.h"
38 #include "BLI_memarena.h"
42 #include "BLI_listbase.h"
43 #include "BLI_linklist.h"
49 #define SPLINE_RESOL_CAP 32
50 #define SPLINE_RESOL 32
51 #define BUCKET_PIXELS_PER_CELL 8
53 #define SF_EDGE_IS_BOUNDARY 0xff
54 #define SF_KEYINDEX_TEMP_ID ((unsigned int) -1)
56 #define TRI_TERMINATOR_ID ((unsigned int) -1)
57 #define TRI_VERT ((unsigned int) -1)
60 /* --------------------------------------------------------------------- */
61 /* local structs for mask rasterizeing */
62 /* --------------------------------------------------------------------- */
65 * A single #MaskRasterHandle contains multile #MaskRasterLayer's,
66 * each #MaskRasterLayer does its own lookup which contributes to
67 * the final pixel with its own blending mode and the final pixel
68 * is blended between these.
71 /* internal use only */
72 typedef struct MaskRasterLayer {
74 unsigned int face_tot;
75 unsigned int (*face_array)[4]; /* access coords tri/quad */
76 float (*face_coords)[3]; /* xy, z 0-1 (1.0 == filled) */
79 /* 2d bounds (to quickly skip bucket lookup) */
84 unsigned int **buckets_face;
85 /* cache divide and subtract */
86 float buckets_xy_scalar[2]; /* (1.0 / (buckets_width + FLT_EPSILON)) * buckets_x */
87 unsigned int buckets_x;
88 unsigned int buckets_y;
91 /* copied direct from #MaskLayer.--- */
92 /* blending options */
99 typedef struct MaskRasterSplineInfo {
100 unsigned int vertex_offset;
101 unsigned int vertex_total;
102 unsigned int is_cyclic;
103 } MaskRasterSplineInfo;
106 * opaque local struct for mask pixel lookup, each MaskLayer needs one of these
108 struct MaskRasterHandle {
109 MaskRasterLayer *layers;
110 unsigned int layers_tot;
112 /* 2d bounds (to quickly skip bucket lookup) */
116 /* --------------------------------------------------------------------- */
117 /* alloc / free functions */
118 /* --------------------------------------------------------------------- */
120 MaskRasterHandle *BLI_maskrasterize_handle_new(void)
122 MaskRasterHandle *mr_handle;
124 mr_handle = MEM_callocN(sizeof(MaskRasterHandle), STRINGIFY(MaskRasterHandle));
129 void BLI_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
131 const unsigned int layers_tot = mr_handle->layers_tot;
133 MaskRasterLayer *layer = mr_handle->layers;
136 for (i = 0; i < layers_tot; i++, layer++) {
138 if (layer->face_array) {
139 MEM_freeN(layer->face_array);
142 if (layer->face_coords) {
143 MEM_freeN(layer->face_coords);
146 if (layer->buckets_face) {
147 const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y;
148 unsigned int bucket_index;
149 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
150 unsigned int *face_index = layer->buckets_face[bucket_index];
152 MEM_freeN(face_index);
156 MEM_freeN(layer->buckets_face);
160 MEM_freeN(mr_handle->layers);
161 MEM_freeN(mr_handle);
165 void maskrasterize_spline_differentiate_point_outset(float (*diff_feather_points)[2], float (*diff_points)[2],
166 const unsigned int tot_diff_point, const float ofs,
169 unsigned int k_prev = tot_diff_point - 2;
170 unsigned int k_curr = tot_diff_point - 1;
171 unsigned int k_next = 0;
179 const float *co_prev;
180 const float *co_curr;
181 const float *co_next;
183 const float ofs_squared = ofs * ofs;
185 co_prev = diff_points[k_prev];
186 co_curr = diff_points[k_curr];
187 co_next = diff_points[k_next];
190 sub_v2_v2v2(d_prev, co_prev, co_curr);
191 normalize_v2(d_prev);
193 for (k = 0; k < tot_diff_point; k++) {
195 /* co_prev = diff_points[k_prev]; */ /* precalc */
196 co_curr = diff_points[k_curr];
197 co_next = diff_points[k_next];
199 /* sub_v2_v2v2(d_prev, co_prev, co_curr); */ /* precalc */
200 sub_v2_v2v2(d_next, co_curr, co_next);
202 /* normalize_v2(d_prev); */ /* precalc */
203 normalize_v2(d_next);
205 if ((do_test == FALSE) ||
206 (len_squared_v2v2(diff_feather_points[k], diff_points[k]) < ofs_squared))
209 add_v2_v2v2(d, d_prev, d_next);
213 diff_feather_points[k][0] = diff_points[k][0] + ( d[1] * ofs);
214 diff_feather_points[k][1] = diff_points[k][1] + (-d[0] * ofs);
218 copy_v2_v2(d_prev, d_next);
220 /* k_prev = k_curr; */ /* precalc */
226 /* this function is not exact, sometimes it retuns false positives,
227 * the main point of it is to clear out _almost_ all bucket/face non-intersections,
228 * returning TRUE in corner cases is ok but missing an intersection is NOT.
231 * - check if the center of the buckets bounding box is intersecting the face
232 * - if not get the max radius to a corner of the bucket and see how close we
233 * are to any of the triangle edges.
235 static int layer_bucket_isect_test(MaskRasterLayer *layer, unsigned int face_index,
236 const unsigned int bucket_x, const unsigned int bucket_y,
237 const float bucket_size_x, const float bucket_size_y,
238 const float bucket_max_rad_squared)
240 unsigned int *face = layer->face_array[face_index];
241 float (*cos)[3] = layer->face_coords;
243 const float xmin = layer->bounds.xmin + (bucket_size_x * bucket_x);
244 const float ymin = layer->bounds.ymin + (bucket_size_y * bucket_y);
245 const float xmax = xmin + bucket_size_x;
246 const float ymax = ymin + bucket_size_y;
248 const float cent[2] = {(xmin + xmax) * 0.5f,
249 (ymin + ymax) * 0.5f};
251 if (face[3] == TRI_VERT) {
252 const float *v1 = cos[face[0]];
253 const float *v2 = cos[face[1]];
254 const float *v3 = cos[face[2]];
256 if (isect_point_tri_v2(cent, v1, v2, v3)) {
260 if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
261 (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
262 (dist_squared_to_line_segment_v2(cent, v3, v1) < bucket_max_rad_squared))
267 // printf("skip tri\n");
274 const float *v1 = cos[face[0]];
275 const float *v2 = cos[face[1]];
276 const float *v3 = cos[face[2]];
277 const float *v4 = cos[face[3]];
279 if (isect_point_tri_v2(cent, v1, v2, v3)) {
282 else if (isect_point_tri_v2(cent, v1, v3, v4)) {
286 if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
287 (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
288 (dist_squared_to_line_segment_v2(cent, v3, v4) < bucket_max_rad_squared) ||
289 (dist_squared_to_line_segment_v2(cent, v4, v1) < bucket_max_rad_squared))
294 // printf("skip quad\n");
301 static void layer_bucket_init_dummy(MaskRasterLayer *layer)
303 layer->buckets_x = 0;
304 layer->buckets_y = 0;
306 layer->buckets_xy_scalar[0] = 0.0f;
307 layer->buckets_xy_scalar[1] = 0.0f;
309 layer->buckets_face = NULL;
312 static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
314 MemArena *arena = BLI_memarena_new(1 << 16, __func__);
316 const float bucket_dim_x = layer->bounds.xmax - layer->bounds.xmin;
317 const float bucket_dim_y = layer->bounds.ymax - layer->bounds.ymin;
319 layer->buckets_x = (bucket_dim_x / pixel_size) / (float)BUCKET_PIXELS_PER_CELL;
320 layer->buckets_y = (bucket_dim_y / pixel_size) / (float)BUCKET_PIXELS_PER_CELL;
322 // printf("bucket size %ux%u\n", layer->buckets_x, layer->buckets_y);
324 CLAMP(layer->buckets_x, 8, 512);
325 CLAMP(layer->buckets_y, 8, 512);
327 layer->buckets_xy_scalar[0] = (1.0f / (bucket_dim_x + FLT_EPSILON)) * layer->buckets_x;
328 layer->buckets_xy_scalar[1] = (1.0f / (bucket_dim_y + FLT_EPSILON)) * layer->buckets_y;
331 /* width and height of each bucket */
332 const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / layer->buckets_x;
333 const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / layer->buckets_y;
334 const float bucket_max_rad = (maxf(bucket_size_x, bucket_size_y) * M_SQRT2) + FLT_EPSILON;
335 const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad;
337 unsigned int *face = &layer->face_array[0][0];
338 float (*cos)[3] = layer->face_coords;
340 const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y;
341 LinkNode **bucketstore = MEM_callocN(bucket_tot * sizeof(LinkNode *), __func__);
342 unsigned int *bucketstore_tot = MEM_callocN(bucket_tot * sizeof(unsigned int), __func__);
344 unsigned int face_index;
346 for (face_index = 0; face_index < layer->face_tot; face_index++, face += 4) {
352 if (face[3] == TRI_VERT) {
353 const float *v1 = cos[face[0]];
354 const float *v2 = cos[face[1]];
355 const float *v3 = cos[face[2]];
357 xmin = fminf(v1[0], fminf(v2[0], v3[0]));
358 xmax = fmaxf(v1[0], fmaxf(v2[0], v3[0]));
359 ymin = fminf(v1[1], fminf(v2[1], v3[1]));
360 ymax = fmaxf(v1[1], fmaxf(v2[1], v3[1]));
363 const float *v1 = cos[face[0]];
364 const float *v2 = cos[face[1]];
365 const float *v3 = cos[face[2]];
366 const float *v4 = cos[face[3]];
368 xmin = fminf(v1[0], fminf(v2[0], fminf(v3[0], v4[0])));
369 xmax = fmaxf(v1[0], fmaxf(v2[0], fmaxf(v3[0], v4[0])));
370 ymin = fminf(v1[1], fminf(v2[1], fminf(v3[1], v4[1])));
371 ymax = fmaxf(v1[1], fmaxf(v2[1], fmaxf(v3[1], v4[1])));
375 /* not essential but may as will skip any faces outside the view */
376 if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) {
378 CLAMP(xmin, 0.0f, 1.0f);
379 CLAMP(ymin, 0.0f, 1.0f);
380 CLAMP(xmax, 0.0f, 1.0f);
381 CLAMP(ymax, 0.0f, 1.0f);
384 const unsigned int xi_min = (unsigned int) ((xmin - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
385 const unsigned int xi_max = (unsigned int) ((xmax - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
386 const unsigned int yi_min = (unsigned int) ((ymin - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
387 const unsigned int yi_max = (unsigned int) ((ymax - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
388 void *face_index_void = SET_UINT_IN_POINTER(face_index);
392 for (yi = yi_min; yi <= yi_max; yi++) {
393 unsigned int bucket_index = (layer->buckets_x * yi) + xi_min;
394 for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) {
395 // unsigned int bucket_index = (layer->buckets_x * yi) + xi; /* correct but do in outer loop */
397 BLI_assert(xi < layer->buckets_x);
398 BLI_assert(yi < layer->buckets_y);
399 BLI_assert(bucket_index < bucket_tot);
401 /* check if the bucket intersects with the face */
402 /* note: there is a tradeoff here since checking box/tri intersections isn't
403 * as optimal as it could be, but checking pixels against faces they will never intersect
404 * with is likely the greater slowdown here - so check if the cell intersects the face */
405 if (layer_bucket_isect_test(layer, face_index,
407 bucket_size_x, bucket_size_y,
408 bucket_max_rad_squared))
410 BLI_linklist_prepend_arena(&bucketstore[bucket_index], face_index_void, arena);
411 bucketstore_tot[bucket_index]++;
420 /* now convert linknodes into arrays for faster per pixel access */
421 unsigned int **buckets_face = MEM_mallocN(bucket_tot * sizeof(unsigned int **), __func__);
422 unsigned int bucket_index;
424 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
425 if (bucketstore_tot[bucket_index]) {
426 unsigned int *bucket = MEM_mallocN((bucketstore_tot[bucket_index] + 1) * sizeof(unsigned int),
428 LinkNode *bucket_node;
430 buckets_face[bucket_index] = bucket;
432 for (bucket_node = bucketstore[bucket_index]; bucket_node; bucket_node = bucket_node->next) {
433 *bucket = GET_UINT_FROM_POINTER(bucket_node->link);
436 *bucket = TRI_TERMINATOR_ID;
439 buckets_face[bucket_index] = NULL;
443 layer->buckets_face = buckets_face;
446 MEM_freeN(bucketstore);
447 MEM_freeN(bucketstore_tot);
450 BLI_memarena_free(arena);
453 void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mask,
454 const int width, const int height,
455 const short do_aspect_correct, const short do_mask_aa,
456 const short do_feather)
458 const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f};
459 const int resol = SPLINE_RESOL; /* TODO: real size */
460 const float pixel_size = 1.0f / MIN2(width, height);
462 const float zvec[3] = {0.0f, 0.0f, 1.0f};
464 unsigned int masklay_index;
466 mr_handle->layers_tot = BLI_countlist(&mask->masklayers);
467 mr_handle->layers = MEM_mallocN(sizeof(MaskRasterLayer) * mr_handle->layers_tot, STRINGIFY(MaskRasterLayer));
468 BLI_rctf_init_minmax(&mr_handle->bounds);
470 for (masklay = mask->masklayers.first, masklay_index = 0; masklay; masklay = masklay->next, masklay_index++) {
472 const unsigned int tot_splines = BLI_countlist(&masklay->splines);
473 /* we need to store vertex ranges for open splines for filling */
474 MaskRasterSplineInfo *open_spline_ranges = MEM_callocN(sizeof(*open_spline_ranges) * tot_splines, __func__);
475 unsigned int open_spline_index = 0;
480 ScanFillContext sf_ctx;
481 ScanFillVert *sf_vert = NULL;
482 ScanFillVert *sf_vert_next = NULL;
483 ScanFillFace *sf_tri;
485 unsigned int sf_vert_tot = 0;
486 unsigned int tot_feather_quads = 0;
488 if (masklay->restrictflag & MASK_RESTRICT_RENDER) {
492 BLI_scanfill_begin(&sf_ctx);
494 for (spline = masklay->splines.first; spline; spline = spline->next) {
495 const unsigned int is_cyclic = (spline->flag & MASK_SPLINE_CYCLIC) != 0;
496 const unsigned int is_fill = (spline->flag & MASK_SPLINE_NOFILL) == 0;
498 float (*diff_points)[2];
501 float (*diff_feather_points)[2];
502 int tot_diff_feather_points;
504 diff_points = BKE_mask_spline_differentiate_with_resolution_ex(
505 spline, resol, &tot_diff_point);
508 diff_feather_points = BKE_mask_spline_feather_differentiated_points_with_resolution_ex(
509 spline, resol, &tot_diff_feather_points);
512 tot_diff_feather_points = 0;
513 diff_feather_points = NULL;
516 if (tot_diff_point > 3) {
517 ScanFillVert *sf_vert_prev;
523 if (do_aspect_correct) {
524 if (width != height) {
530 if (width < height) {
531 fp = &diff_points[0][0];
532 ffp = tot_diff_feather_points ? &diff_feather_points[0][0] : NULL;
533 asp = (float)width / (float)height;
536 fp = &diff_points[0][1];
537 ffp = tot_diff_feather_points ? &diff_feather_points[0][1] : NULL;
538 asp = (float)height / (float)width;
541 for (i = 0; i < tot_diff_point; i++, fp += 2) {
542 (*fp) = (((*fp) - 0.5f) / asp) + 0.5f;
545 if (tot_diff_feather_points) {
546 for (i = 0; i < tot_diff_feather_points; i++, ffp += 2) {
547 (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f;
553 /* fake aa, using small feather */
554 if (do_mask_aa == TRUE) {
555 if (do_feather == FALSE) {
556 tot_diff_feather_points = tot_diff_point;
557 diff_feather_points = MEM_mallocN(sizeof(*diff_feather_points) * tot_diff_feather_points,
559 /* add single pixel feather */
560 maskrasterize_spline_differentiate_point_outset(diff_feather_points, diff_points,
561 tot_diff_point, pixel_size, FALSE);
564 /* ensure single pixel feather, on any zero feather areas */
565 maskrasterize_spline_differentiate_point_outset(diff_feather_points, diff_points,
566 tot_diff_point, pixel_size, TRUE);
571 copy_v2_v2(co, diff_points[0]);
572 sf_vert_prev = BLI_scanfill_vert_add(&sf_ctx, co);
573 sf_vert_prev->tmp.u = sf_vert_tot;
574 sf_vert_prev->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */
577 /* TODO, an alternate functions so we can avoid double vector copy! */
578 for (j = 1; j < tot_diff_point; j++) {
579 copy_v2_v2(co, diff_points[j]);
580 sf_vert = BLI_scanfill_vert_add(&sf_ctx, co);
581 sf_vert->tmp.u = sf_vert_tot;
582 sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */
586 sf_vert = sf_vert_prev;
587 sf_vert_prev = sf_ctx.fillvertbase.last;
589 for (j = 0; j < tot_diff_point; j++) {
590 ScanFillEdge *sf_edge = BLI_scanfill_edge_add(&sf_ctx, sf_vert_prev, sf_vert);
591 sf_edge->tmp.c = SF_EDGE_IS_BOUNDARY;
593 sf_vert_prev = sf_vert;
594 sf_vert = sf_vert->next;
597 if (diff_feather_points) {
599 co_feather[2] = 1.0f;
601 BLI_assert(tot_diff_feather_points == tot_diff_point);
603 /* note: only added for convenience, we don't infact use these to scanfill,
604 * only to create feather faces after scanfill */
605 for (j = 0; j < tot_diff_feather_points; j++) {
606 copy_v2_v2(co_feather, diff_feather_points[j]);
607 sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather);
609 /* no need for these attrs */
611 sf_vert->tmp.u = sf_vert_tot;
612 sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */
614 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
618 if (diff_feather_points) {
619 MEM_freeN(diff_feather_points);
622 tot_feather_quads += tot_diff_point;
626 /* unfilled spline */
627 if (diff_feather_points) {
632 co_feather[2] = 1.0f;
634 open_spline_ranges[open_spline_index].vertex_offset = sf_vert_tot;
635 open_spline_ranges[open_spline_index].vertex_total = tot_diff_point;
636 open_spline_ranges[open_spline_index].is_cyclic = is_cyclic;
640 /* TODO, an alternate functions so we can avoid double vector copy! */
641 for (j = 0; j < tot_diff_point; j++) {
644 copy_v2_v2(co, diff_points[j]);
645 sf_vert = BLI_scanfill_vert_add(&sf_ctx, co);
646 sf_vert->tmp.u = sf_vert_tot;
647 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
652 copy_v2_v2(co_feather, diff_feather_points[j]);
653 sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather);
654 sf_vert->tmp.u = sf_vert_tot;
655 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
660 sub_v2_v2v2(co_diff, co, co_feather);
661 add_v2_v2v2(co_feather, co, co_diff);
662 sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather);
663 sf_vert->tmp.u = sf_vert_tot;
664 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
667 tot_feather_quads += 2;
671 tot_feather_quads -= 2;
674 MEM_freeN(diff_feather_points);
676 /* ack these are infact tris, but they are extra faces so no matter,
677 * +1 becausing adding one vert results in 2 tris (joining the existing endpoints)
679 // tot_feather_quads + ((SPLINE_RESOL_CAP + 1) * 2);
686 MEM_freeN(diff_points);
690 if (sf_ctx.fillvertbase.first) {
691 unsigned int (*face_array)[4], *face; /* access coords */
692 float (*face_coords)[3], *cos; /* xy, z 0-1 (1.0 == filled) */
697 /* now we have all the splines */
698 face_coords = MEM_mallocN((sizeof(float) * 3) * sf_vert_tot, "maskrast_face_coords");
701 BLI_rctf_init_minmax(&bounds);
704 cos = (float *)face_coords;
705 for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert_next) {
706 sf_vert_next = sf_vert->next;
707 copy_v3_v3(cos, sf_vert->co);
709 /* remove so as not to interfear with fill (called after) */
710 if (sf_vert->keyindex == SF_KEYINDEX_TEMP_ID) {
711 BLI_remlink(&sf_ctx.fillvertbase, sf_vert);
715 BLI_rctf_do_minmax_v(&bounds, cos);
721 sf_tri_tot = BLI_scanfill_calc_ex(&sf_ctx, FALSE, zvec);
723 face_array = MEM_mallocN(sizeof(*face_array) * (sf_tri_tot + tot_feather_quads), "maskrast_face_index");
726 face = (unsigned int *)face_array;
727 for (sf_tri = sf_ctx.fillfacebase.first, face_index = 0; sf_tri; sf_tri = sf_tri->next, face_index++) {
728 *(face++) = sf_tri->v1->tmp.u;
729 *(face++) = sf_tri->v2->tmp.u;
730 *(face++) = sf_tri->v3->tmp.u;
731 *(face++) = TRI_VERT;
734 /* start of feather faces... if we have this set,
735 * 'face_index' is kept from loop above */
737 BLI_assert(face_index == sf_tri_tot);
739 if (tot_feather_quads) {
740 ScanFillEdge *sf_edge;
742 for (sf_edge = sf_ctx.filledgebase.first; sf_edge; sf_edge = sf_edge->next) {
743 if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) {
744 *(face++) = sf_edge->v1->tmp.u;
745 *(face++) = sf_edge->v2->tmp.u;
746 *(face++) = sf_edge->v2->keyindex;
747 *(face++) = sf_edge->v1->keyindex;
754 /* feather only splines */
755 while (open_spline_index > 0) {
756 unsigned int start_vidx = open_spline_ranges[--open_spline_index].vertex_offset;
757 unsigned int tot_diff_point_sub1 = open_spline_ranges[ open_spline_index].vertex_total - 1;
762 /* subtract one since we reference next vertex triple */
763 for (k = 0; k < tot_diff_point_sub1; k++, j += 3) {
765 BLI_assert(j == start_vidx + (k * 3));
767 *(face++) = j + 3; /* next span */ /* z 1 */
768 *(face++) = j + 0; /* z 1 */
769 *(face++) = j + 1; /* z 0 */
770 *(face++) = j + 4; /* next span */ /* z 0 */
774 *(face++) = j + 0; /* z 1 */
775 *(face++) = j + 3; /* next span */ /* z 1 */
776 *(face++) = j + 5; /* next span */ /* z 0 */
777 *(face++) = j + 2; /* z 0 */
782 if (open_spline_ranges[open_spline_index].is_cyclic) {
783 *(face++) = start_vidx + 0; /* next span */ /* z 1 */
784 *(face++) = j + 0; /* z 1 */
785 *(face++) = j + 1; /* z 0 */
786 *(face++) = start_vidx + 1; /* next span */ /* z 0 */
790 *(face++) = j + 0; /* z 1 */
791 *(face++) = start_vidx + 0; /* next span */ /* z 1 */
792 *(face++) = start_vidx + 2; /* next span */ /* z 0 */
793 *(face++) = j + 2; /* z 0 */
799 MEM_freeN(open_spline_ranges);
801 // fprintf(stderr, "%d %d\n", face_index, sf_face_tot + tot_feather_quads);
803 BLI_assert(face_index == sf_tri_tot + tot_feather_quads);
806 MaskRasterLayer *layer = &mr_handle->layers[masklay_index];
808 if (BLI_rctf_isect(&default_bounds, &bounds, &bounds)) {
809 layer->face_tot = sf_tri_tot + tot_feather_quads;
810 layer->face_coords = face_coords;
811 layer->face_array = face_array;
812 layer->bounds = bounds;
814 layer_bucket_init(layer, pixel_size);
816 BLI_rctf_union(&mr_handle->bounds, &bounds);
819 MEM_freeN(face_coords);
820 MEM_freeN(face_array);
823 layer->face_coords = NULL;
824 layer->face_array = NULL;
826 layer_bucket_init_dummy(layer);
828 BLI_rctf_init(&layer->bounds, -1.0f, -1.0f, -1.0f, -1.0f);
832 layer->alpha = masklay->alpha;
833 layer->blend = masklay->blend;
834 layer->blend_flag = masklay->blend_flag;
837 /* printf("tris %d, feather tris %d\n", sf_tri_tot, tot_feather_quads); */
841 BLI_scanfill_end(&sf_ctx);
846 /* --------------------------------------------------------------------- */
847 /* functions that run inside the sampling thread (keep fast!) */
848 /* --------------------------------------------------------------------- */
851 static float maskrasterize_layer_z_depth_tri(const float pt[2],
852 const float v1[3], const float v2[3], const float v3[3])
855 barycentric_weights_v2(v1, v2, v3, pt, w);
856 return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]);
860 static float maskrasterize_layer_z_depth_quad(const float pt[2],
861 const float v1[3], const float v2[3], const float v3[3], const float v4[3])
864 barycentric_weights_v2_quad(v1, v2, v3, v4, pt, w);
865 return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]) + (v4[2] * w[3]);
869 static float maskrasterize_layer_isect(unsigned int *face, float (*cos)[3], const float dist_orig, const float xy[2])
871 /* we always cast from same place only need xy */
872 if (face[3] == TRI_VERT) {
876 /* not essential but avoids unneeded extra lookups */
877 if ((cos[0][2] < dist_orig) ||
878 (cos[1][2] < dist_orig) ||
879 (cos[2][2] < dist_orig))
881 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
882 /* we know all tris are close for now */
883 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
887 /* we know all tris are close for now */
889 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
898 /* not essential but avoids unneeded extra lookups */
899 if ((cos[0][2] < dist_orig) ||
900 (cos[1][2] < dist_orig) ||
901 (cos[2][2] < dist_orig) ||
902 (cos[3][2] < dist_orig))
907 if (isect_point_quad_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]])) {
908 return maskrasterize_layer_z_depth_quad(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]);
911 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
912 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
914 else if (isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) {
915 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[2]], cos[face[3]]);
918 /* cheat - we know first 2 verts are z0.0f and second 2 are z 1.0f */
919 /* ... worth looking into */
927 BLI_INLINE unsigned int layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
929 BLI_assert(BLI_in_rctf_v(&layer->bounds, xy));
931 return ( (unsigned int)((xy[0] - layer->bounds.xmin) * layer->buckets_xy_scalar[0])) +
932 (((unsigned int)((xy[1] - layer->bounds.ymin) * layer->buckets_xy_scalar[1])) * layer->buckets_x);
935 static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
937 unsigned int index = layer_bucket_index_from_xy(layer, xy);
938 unsigned int *face_index = layer->buckets_face[index];
941 unsigned int (*face_array)[4] = layer->face_array;
942 float (*cos)[3] = layer->face_coords;
943 float best_dist = 1.0f;
944 while (*face_index != TRI_TERMINATOR_ID) {
945 const float test_dist = maskrasterize_layer_isect(face_array[*face_index], cos, best_dist, xy);
946 if (test_dist < best_dist) {
947 best_dist = test_dist;
948 /* comparing with 0.0f is OK here because triangles are always zero depth */
949 if (best_dist == 0.0f) {
950 /* bail early, we're as close as possible */
963 float BLI_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
965 /* can't do this because some layers may invert */
966 /* if (BLI_in_rctf_v(&mr_handle->bounds, xy)) */
968 const unsigned int layers_tot = mr_handle->layers_tot;
970 MaskRasterLayer *layer = mr_handle->layers;
975 for (i = 0; i < layers_tot; i++, layer++) {
978 if (BLI_in_rctf_v(&layer->bounds, xy)) {
979 const float dist = 1.0f - layer_bucket_depth_from_xy(layer, xy);
980 const float dist_ease = (3.0f * dist * dist - 2.0f * dist * dist * dist);
983 value_layer = dist_ease * layer->alpha;
989 if (layer->blend_flag & MASK_BLENDFLAG_INVERT) {
990 value_layer = 1.0f - value_layer;
993 switch (layer->blend) {
994 case MASK_BLEND_SUBTRACT:
996 value -= value_layer;
1002 value += value_layer;
1008 return CLAMPIS(value, 0.0f, 1.0f);
1011 #endif /* USE_RASKTER */