Cleanup: doxygen comments
[blender-staging.git] / source / blender / bmesh / intern / bmesh_polygon_edgenet.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * ***** END GPL LICENSE BLOCK *****
19  */
20
21 /** \file blender/bmesh/intern/bmesh_polygon_edgenet.c
22  *  \ingroup bmesh
23  *
24  * This file contains functions for splitting faces into isolated regions,
25  * defined by connected edges.
26  */
27 // #define DEBUG_PRINT
28
29 #include "MEM_guardedalloc.h"
30
31 #include "BLI_math.h"
32 #include "BLI_memarena.h"
33 #include "BLI_array.h"
34 #include "BLI_alloca.h"
35 #include "BLI_utildefines_stack.h"
36 #include "BLI_linklist_stack.h"
37 #include "BLI_sort.h"
38 #include "BLI_sort_utils.h"
39 #include "BLI_kdopbvh.h"
40
41 #include "BKE_customdata.h"
42
43 #include "bmesh.h"
44 #include "intern/bmesh_private.h"
45
46 /* -------------------------------------------------------------------- */
47 /* Face Split Edge-Net */
48
49 /** \name BM_face_split_edgenet and helper functions.
50  *
51  * \note Don't use #BM_edge_is_wire or #BM_edge_is_boundary
52  * since we need to take flagged faces into account.
53  * Also take care accessing e->l directly.
54  *
55  * \{ */
56
57 /* Note: All these flags _must_ be cleared on exit */
58
59 /* face is apart of the edge-net (including the original face we're splitting) */
60 #define FACE_NET  _FLAG_WALK
61 /* edge is apart of the edge-net we're filling */
62 #define EDGE_NET   _FLAG_WALK
63 /* tag verts we've visit */
64 #define VERT_VISIT _FLAG_WALK
65 #define VERT_IN_QUEUE _FLAG_WALK_ALT
66
67 struct VertOrder {
68         float   angle;
69         BMVert *v;
70 };
71
72 static uint bm_edge_flagged_radial_count(BMEdge *e)
73 {
74         uint count = 0;
75         BMLoop *l;
76
77         if ((l = e->l)) {
78                 do {
79                         if (BM_ELEM_API_FLAG_TEST(l->f, FACE_NET)) {
80                                 count++;
81                         }
82                 } while ((l = l->radial_next) != e->l);
83         }
84         return count;
85 }
86
87 static BMLoop *bm_edge_flagged_radial_first(BMEdge *e)
88 {
89         BMLoop *l;
90
91         if ((l = e->l)) {
92                 do {
93                         if (BM_ELEM_API_FLAG_TEST(l->f, FACE_NET)) {
94                                 return l;
95                         }
96                 } while ((l = l->radial_next) != e->l);
97         }
98         return NULL;
99 }
100
101 static void normalize_v2_m3_v3v3(float out[2], float axis_mat[3][3], const float v1[3], const float v2[3])
102 {
103         float dir[3];
104         sub_v3_v3v3(dir, v1, v2);
105         mul_v2_m3v3(out, axis_mat, dir);
106         normalize_v2(out);
107 }
108
109
110 /**
111  * \note Be sure to update #bm_face_split_edgenet_find_loop_pair_exists
112  * when making changed to edge picking logic.
113  */
114 static bool bm_face_split_edgenet_find_loop_pair(
115         BMVert *v_init,
116         const float face_normal[3], float face_normal_matrix[3][3],
117         BMEdge *e_pair[2])
118 {
119         /* Always find one boundary edge (to determine winding)
120          * and one wire (if available), otherwise another boundary.
121          */
122
123         /* detect winding */
124         BMLoop *l_walk;
125         bool swap;
126
127         BLI_SMALLSTACK_DECLARE(edges_boundary, BMEdge *);
128         BLI_SMALLSTACK_DECLARE(edges_wire,     BMEdge *);
129         int edges_boundary_len = 0;
130         int edges_wire_len = 0;
131
132         {
133                 BMEdge *e, *e_first;
134                 e = e_first = v_init->e;
135                 do {
136                         if (BM_ELEM_API_FLAG_TEST(e, EDGE_NET)) {
137                                 const uint count = bm_edge_flagged_radial_count(e);
138                                 if (count == 1) {
139                                         BLI_SMALLSTACK_PUSH(edges_boundary, e);
140                                         edges_boundary_len++;
141                                 }
142                                 else if (count == 0) {
143                                         BLI_SMALLSTACK_PUSH(edges_wire, e);
144                                         edges_wire_len++;
145                                 }
146                         }
147                 } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
148         }
149
150         /* first edge should always be boundary */
151         if (edges_boundary_len == 0) {
152                 return false;
153         }
154         e_pair[0] = BLI_SMALLSTACK_POP(edges_boundary);
155
156         /* use to hold boundary OR wire edges */
157         BLI_SMALLSTACK_DECLARE(edges_search, BMEdge *);
158
159         /* attempt one boundary and one wire, or 2 boundary */
160         if (edges_wire_len == 0) {
161                 if (edges_boundary_len > 1) {
162                         e_pair[1] = BLI_SMALLSTACK_POP(edges_boundary);
163
164                         if (edges_boundary_len > 2) {
165                                 BLI_SMALLSTACK_SWAP(edges_search, edges_boundary);
166                         }
167                 }
168                 else {
169                         /* one boundary and no wire */
170                         return false;
171                 }
172         }
173         else {
174                 e_pair[1] = BLI_SMALLSTACK_POP(edges_wire);
175                 if (edges_wire_len > 1) {
176                         BLI_SMALLSTACK_SWAP(edges_search, edges_wire);
177                 }
178         }
179
180         /* if we swapped above, search this list for the best edge */
181         if (!BLI_SMALLSTACK_IS_EMPTY(edges_search)) {
182                 /* find the best edge in 'edge_list' to use for 'e_pair[1]' */
183                 const BMVert *v_prev = BM_edge_other_vert(e_pair[0], v_init);
184                 const BMVert *v_next = BM_edge_other_vert(e_pair[1], v_init);
185
186                 float dir_prev[2], dir_next[2];
187
188                 normalize_v2_m3_v3v3(dir_prev, face_normal_matrix, v_prev->co, v_init->co);
189                 normalize_v2_m3_v3v3(dir_next, face_normal_matrix, v_next->co, v_init->co);
190                 float angle_best_cos = dot_v2v2(dir_next, dir_prev);
191
192                 BMEdge *e;
193                 while ((e = BLI_SMALLSTACK_POP(edges_search))) {
194                         v_next = BM_edge_other_vert(e, v_init);
195                         float dir_test[2];
196
197                         normalize_v2_m3_v3v3(dir_test, face_normal_matrix, v_next->co, v_init->co);
198                         const float angle_test_cos = dot_v2v2(dir_prev, dir_test);
199
200                         if (angle_test_cos > angle_best_cos) {
201                                 angle_best_cos = angle_test_cos;
202                                 e_pair[1] = e;
203                         }
204                 }
205         }
206
207         /* flip based on winding */
208         l_walk = bm_edge_flagged_radial_first(e_pair[0]);
209         swap = false;
210         if (face_normal == l_walk->f->no) {
211                 swap = !swap;
212         }
213         if (l_walk->v != v_init) {
214                 swap = !swap;
215         }
216         if (swap) {
217                 SWAP(BMEdge *, e_pair[0], e_pair[1]);
218         }
219
220         return true;
221 }
222
223 /**
224  * A reduced version of #bm_face_split_edgenet_find_loop_pair
225  * that only checks if it would return true.
226  *
227  * \note There is no use in caching resulting edges here,
228  * since between this check and running #bm_face_split_edgenet_find_loop,
229  * the selected edges may have had faces attached.
230  */
231 static bool bm_face_split_edgenet_find_loop_pair_exists(
232         BMVert *v_init)
233 {
234         int edges_boundary_len = 0;
235         int edges_wire_len = 0;
236
237         {
238                 BMEdge *e, *e_first;
239                 e = e_first = v_init->e;
240                 do {
241                         if (BM_ELEM_API_FLAG_TEST(e, EDGE_NET)) {
242                                 const uint count = bm_edge_flagged_radial_count(e);
243                                 if (count == 1) {
244                                         edges_boundary_len++;
245                                 }
246                                 else if (count == 0) {
247                                         edges_wire_len++;
248                                 }
249                         }
250                 } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
251         }
252
253         /* first edge should always be boundary */
254         if (edges_boundary_len == 0) {
255                 return false;
256         }
257
258         /* attempt one boundary and one wire, or 2 boundary */
259         if (edges_wire_len == 0) {
260                 if (edges_boundary_len >= 2) {
261                         /* pass */
262                 }
263                 else {
264                         /* one boundary and no wire */
265                         return false;
266                 }
267         }
268         else {
269                 /* pass */
270         }
271
272         return true;
273 }
274
275 static bool bm_face_split_edgenet_find_loop_walk(
276         BMVert *v_init, const float face_normal[3],
277         /* cache to avoid realloc every time */
278         struct VertOrder *edge_order, const uint edge_order_len,
279         BMEdge *e_pair[2])
280 {
281         /* fast-path for the common case (avoid push-pop).
282          * Also avoids tagging as visited since we know we
283          * can't reach these verts some other way */
284 #define USE_FASTPATH_NOFORK
285
286         BMVert *v;
287         BMVert *v_dst;
288         bool found = false;
289
290         struct VertOrder *eo;
291         STACK_DECLARE(edge_order);
292
293         /* store visited verts so we can clear the visit flag after execution */
294         BLI_SMALLSTACK_DECLARE(vert_visit, BMVert *);
295
296         /* likely this will stay very small
297          * all verts pushed into this stack _must_ have their previous edges set! */
298         BLI_SMALLSTACK_DECLARE(vert_stack, BMVert *);
299         BLI_SMALLSTACK_DECLARE(vert_stack_next, BMVert *);
300
301         STACK_INIT(edge_order, edge_order_len);
302
303         /* start stepping */
304         v = BM_edge_other_vert(e_pair[0], v_init);
305         v->e = e_pair[0];
306         BLI_SMALLSTACK_PUSH(vert_stack, v);
307
308         v_dst = BM_edge_other_vert(e_pair[1], v_init);
309
310 #ifdef DEBUG_PRINT
311         printf("%s: vert (search) %d\n", __func__, BM_elem_index_get(v_init));
312 #endif
313
314         /* This loop will keep stepping over the best possible edge,
315          * in most cases it finds the direct route to close the face.
316          *
317          * In cases where paths can't be closed,
318          * alternatives are stored in the 'vert_stack'.
319          */
320         while ((v = BLI_SMALLSTACK_POP_EX(vert_stack, vert_stack_next))) {
321 #ifdef USE_FASTPATH_NOFORK
322 walk_nofork:
323 #else
324                 BLI_SMALLSTACK_PUSH(vert_visit, v);
325                 BM_ELEM_API_FLAG_ENABLE(v, VERT_VISIT);
326 #endif
327
328                 BLI_assert(STACK_SIZE(edge_order) == 0);
329
330                 /* check if we're done! */
331                 if (v == v_dst) {
332                         found = true;
333                         goto finally;
334                 }
335
336                 BMEdge *e_next, *e_first;
337                 e_first = v->e;
338                 e_next = BM_DISK_EDGE_NEXT(e_first, v);  /* always skip this verts edge */
339
340                 /* in rare cases there may be edges with a single connecting vertex */
341                 if (e_next != e_first) {
342                         do {
343                                 if ((BM_ELEM_API_FLAG_TEST(e_next, EDGE_NET)) &&
344                                     (bm_edge_flagged_radial_count(e_next) < 2))
345                                 {
346                                         BMVert *v_next;
347
348                                         v_next = BM_edge_other_vert(e_next, v);
349                                         BLI_assert(v->e != e_next);
350
351 #ifdef DEBUG_PRINT
352                                         /* indent and print */
353                                         {
354                                                 BMVert *_v = v;
355                                                 do {
356                                                         printf("  ");
357                                                 } while ((_v = BM_edge_other_vert(_v->e, _v)) != v_init);
358                                                 printf("vert %d -> %d (add=%d)\n",
359                                                        BM_elem_index_get(v), BM_elem_index_get(v_next),
360                                                        BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT) == 0);
361                                         }
362 #endif
363
364                                         if (!BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT)) {
365                                                 eo = STACK_PUSH_RET_PTR(edge_order);
366                                                 eo->v = v_next;
367
368                                                 v_next->e = e_next;
369                                         }
370                                 }
371                         } while ((e_next = BM_DISK_EDGE_NEXT(e_next, v)) != e_first);
372                 }
373
374 #ifdef USE_FASTPATH_NOFORK
375                 if (STACK_SIZE(edge_order) == 1) {
376                         eo = STACK_POP_PTR(edge_order);
377                         v = eo->v;
378
379                         goto walk_nofork;
380                 }
381 #endif
382
383                 /* sort by angle if needed */
384                 if (STACK_SIZE(edge_order) > 1) {
385                         uint j;
386                         BMVert *v_prev = BM_edge_other_vert(v->e, v);
387
388                         for (j = 0; j < STACK_SIZE(edge_order); j++) {
389                                 edge_order[j].angle = angle_signed_on_axis_v3v3v3_v3(v_prev->co, v->co, edge_order[j].v->co, face_normal);
390                         }
391                         qsort(edge_order, STACK_SIZE(edge_order), sizeof(struct VertOrder), BLI_sortutil_cmp_float_reverse);
392
393 #ifdef USE_FASTPATH_NOFORK
394                         /* only tag forks */
395                         BLI_SMALLSTACK_PUSH(vert_visit, v);
396                         BM_ELEM_API_FLAG_ENABLE(v, VERT_VISIT);
397 #endif
398                 }
399
400                 while ((eo = STACK_POP_PTR(edge_order))) {
401                         BLI_SMALLSTACK_PUSH(vert_stack_next, eo->v);
402                 }
403
404                 if (!BLI_SMALLSTACK_IS_EMPTY(vert_stack_next)) {
405                         BLI_SMALLSTACK_SWAP(vert_stack, vert_stack_next);
406                 }
407         }
408
409
410 finally:
411         /* clear flag for next execution */
412         while ((v = BLI_SMALLSTACK_POP(vert_visit))) {
413                 BM_ELEM_API_FLAG_DISABLE(v, VERT_VISIT);
414         }
415
416         return found;
417
418 #undef USE_FASTPATH_NOFORK
419 }
420
421 static bool bm_face_split_edgenet_find_loop(
422         BMVert *v_init, const float face_normal[3], float face_normal_matrix[3][3],
423         /* cache to avoid realloc every time */
424         struct VertOrder *edge_order, const uint edge_order_len,
425         BMVert **r_face_verts, int *r_face_verts_len)
426 {
427         BMEdge *e_pair[2];
428         BMVert *v;
429
430         if (!bm_face_split_edgenet_find_loop_pair(v_init, face_normal, face_normal_matrix, e_pair)) {
431                 return false;
432         }
433
434         BLI_assert((bm_edge_flagged_radial_count(e_pair[0]) == 1) ||
435                    (bm_edge_flagged_radial_count(e_pair[1]) == 1));
436
437         if (bm_face_split_edgenet_find_loop_walk(v_init, face_normal, edge_order, edge_order_len, e_pair)) {
438                 uint i = 0;
439
440                 r_face_verts[i++] = v_init;
441                 v = BM_edge_other_vert(e_pair[1], v_init);
442                 do {
443                         r_face_verts[i++] = v;
444                 } while ((v = BM_edge_other_vert(v->e, v)) != v_init);
445                 *r_face_verts_len = i;
446                 return (i > 2) ? true : false;
447         }
448         else {
449                 return false;
450         }
451 }
452
453 /**
454  * Splits a face into many smaller faces defined by an edge-net.
455  * handle customdata and degenerate cases.
456  *
457  * - isolated holes or unsupported face configurations, will be ignored.
458  * - customdata calculations aren't efficient
459  *   (need to calculate weights for each vert).
460  */
461 bool BM_face_split_edgenet(
462         BMesh *bm,
463         BMFace *f, BMEdge **edge_net, const int edge_net_len,
464         BMFace ***r_face_arr, int *r_face_arr_len)
465 {
466         /* re-use for new face verts */
467         BMVert **face_verts;
468         int      face_verts_len;
469
470         BMFace **face_arr = NULL;
471         BLI_array_declare(face_arr);
472
473         BMVert **vert_queue;
474         STACK_DECLARE(vert_queue);
475         int i;
476
477         struct VertOrder *edge_order;
478         const uint edge_order_len = edge_net_len + 2;
479
480         BMVert *v;
481
482         BMLoop *l_iter, *l_first;
483
484
485         if (!edge_net_len) {
486                 if (r_face_arr) {
487                         *r_face_arr = NULL;
488                         *r_face_arr_len = 0;
489                 }
490                 return false;
491         }
492
493         /* over-alloc (probably 2-4 is only used in most cases), for the biggest-fan */
494         edge_order = BLI_array_alloca(edge_order, edge_order_len);
495
496         /* use later */
497         face_verts = BLI_array_alloca(face_verts, edge_net_len + f->len);
498
499         vert_queue = BLI_array_alloca(vert_queue, edge_net_len + f->len);
500         STACK_INIT(vert_queue, f->len + edge_net_len);
501
502         BLI_assert(BM_ELEM_API_FLAG_TEST(f, FACE_NET) == 0);
503         BM_ELEM_API_FLAG_ENABLE(f, FACE_NET);
504
505 #ifdef DEBUG
506         for (i = 0; i < edge_net_len; i++) {
507                 BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
508                 BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
509         }
510         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
511         do {
512                 BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter->e, EDGE_NET) == 0);
513         } while ((l_iter = l_iter->next) != l_first);
514 #endif
515
516         /* Note: 'VERT_IN_QUEUE' is often not needed at all,
517          * however in rare cases verts are added multiple times to the queue,
518          * that on it's own is harmless but in _very_ rare cases,
519          * the queue will overflow its maximum size,
520          * so we better be strict about this! see: T51539 */
521
522         for (i = 0; i < edge_net_len; i++) {
523                 BM_ELEM_API_FLAG_ENABLE(edge_net[i], EDGE_NET);
524                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v1, VERT_IN_QUEUE);
525                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v2, VERT_IN_QUEUE);
526         }
527         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
528         do {
529                 BM_ELEM_API_FLAG_ENABLE(l_iter->e, EDGE_NET);
530                 BM_ELEM_API_FLAG_DISABLE(l_iter->v, VERT_IN_QUEUE);
531         } while ((l_iter = l_iter->next) != l_first);
532
533         float face_normal_matrix[3][3];
534         axis_dominant_v3_to_m3(face_normal_matrix, f->no);
535
536
537         /* any vert can be used to begin with */
538         STACK_PUSH(vert_queue, l_first->v);
539         BM_ELEM_API_FLAG_ENABLE(l_first->v, VERT_IN_QUEUE);
540
541         while ((v = STACK_POP(vert_queue))) {
542                 BM_ELEM_API_FLAG_DISABLE(v, VERT_IN_QUEUE);
543                 if (bm_face_split_edgenet_find_loop(
544                         v, f->no, face_normal_matrix,
545                         edge_order, edge_order_len, face_verts, &face_verts_len))
546                 {
547                         BMFace *f_new;
548
549                         f_new = BM_face_create_verts(bm, face_verts, face_verts_len, f, BM_CREATE_NOP, false);
550
551                         for (i = 0; i < edge_net_len; i++) {
552                                 BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET));
553                         }
554
555                         if (f_new) {
556                                 BLI_array_append(face_arr, f_new);
557                                 copy_v3_v3(f_new->no, f->no);
558
559                                 /* warning, normally don't do this,
560                                  * its needed for mesh intersection - which tracks face-sides based on selection */
561                                 f_new->head.hflag = f->head.hflag;
562                                 if (f->head.hflag & BM_ELEM_SELECT) {
563                                         bm->totfacesel++;
564                                 }
565
566                                 BM_ELEM_API_FLAG_ENABLE(f_new, FACE_NET);
567
568                                 /* add new verts to keep finding loops for
569                                  * (verts between boundary and manifold edges) */
570                                 l_iter = l_first = BM_FACE_FIRST_LOOP(f_new);
571                                 do {
572                                         /* Avoid adding to queue multiple times (not common but happens). */
573                                         if (!BM_ELEM_API_FLAG_TEST(l_iter->v, VERT_IN_QUEUE) &&
574                                             bm_face_split_edgenet_find_loop_pair_exists(l_iter->v))
575                                         {
576                                                 STACK_PUSH(vert_queue, l_iter->v);
577                                                 BM_ELEM_API_FLAG_ENABLE(l_iter->v, VERT_IN_QUEUE);
578                                         }
579                                 } while ((l_iter = l_iter->next) != l_first);
580                         }
581                 }
582         }
583
584
585         if (CustomData_has_math(&bm->ldata)) {
586                 /* reuse VERT_VISIT here to tag vert's already interpolated */
587                 BMIter iter;
588                 BMLoop *l_other;
589
590                 /* see: #BM_loop_interp_from_face for similar logic  */
591                 void **blocks   = BLI_array_alloca(blocks, f->len);
592                 float (*cos_2d)[2] = BLI_array_alloca(cos_2d, f->len);
593                 float *w        = BLI_array_alloca(w, f->len);
594                 float axis_mat[3][3];
595                 float co[2];
596
597                 /* interior loops */
598                 axis_dominant_v3_to_m3(axis_mat, f->no);
599
600
601                 /* first simply copy from existing face */
602                 i = 0;
603                 l_iter = l_first = BM_FACE_FIRST_LOOP(f);
604                 do {
605                         BM_ITER_ELEM (l_other, &iter, l_iter->v, BM_LOOPS_OF_VERT) {
606                                 if ((l_other->f != f) && BM_ELEM_API_FLAG_TEST(l_other->f, FACE_NET)) {
607                                         CustomData_bmesh_copy_data(&bm->ldata, &bm->ldata,
608                                                                    l_iter->head.data, &l_other->head.data);
609                                 }
610                         }
611                         /* tag not to interpolate */
612                         BM_ELEM_API_FLAG_ENABLE(l_iter->v, VERT_VISIT);
613
614
615                         mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
616                         blocks[i] = l_iter->head.data;
617
618                 } while ((void)i++, (l_iter = l_iter->next) != l_first);
619
620
621                 for (i = 0; i < edge_net_len; i++) {
622                         BM_ITER_ELEM (v, &iter, edge_net[i], BM_VERTS_OF_EDGE) {
623                                 if (!BM_ELEM_API_FLAG_TEST(v, VERT_VISIT)) {
624                                         BMIter liter;
625
626                                         BM_ELEM_API_FLAG_ENABLE(v, VERT_VISIT);
627
628                                         /* interpolate this loop, then copy to the rest */
629                                         l_first = NULL;
630
631                                         BM_ITER_ELEM (l_iter, &liter, v, BM_LOOPS_OF_VERT) {
632                                                 if (BM_ELEM_API_FLAG_TEST(l_iter->f, FACE_NET)) {
633                                                         if (l_first == NULL) {
634                                                                 mul_v2_m3v3(co, axis_mat, v->co);
635                                                                 interp_weights_poly_v2(w, cos_2d, f->len, co);
636                                                                 CustomData_bmesh_interp(
637                                                                         &bm->ldata, (const void **)blocks,
638                                                                         w, NULL, f->len, l_iter->head.data);
639                                                                 l_first = l_iter;
640                                                         }
641                                                         else {
642                                                                 CustomData_bmesh_copy_data(&bm->ldata, &bm->ldata,
643                                                                                            l_first->head.data, &l_iter->head.data);
644                                                         }
645                                                 }
646                                         }
647                                 }
648                         }
649                 }
650         }
651
652
653
654         /* cleanup */
655         for (i = 0; i < edge_net_len; i++) {
656                 BM_ELEM_API_FLAG_DISABLE(edge_net[i], EDGE_NET);
657                 /* from interp only */
658                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v1, VERT_VISIT);
659                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v2, VERT_VISIT);
660         }
661         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
662         do {
663                 BM_ELEM_API_FLAG_DISABLE(l_iter->e, EDGE_NET);
664                 /* from interp only */
665                 BM_ELEM_API_FLAG_DISABLE(l_iter->v, VERT_VISIT);
666         } while ((l_iter = l_iter->next) != l_first);
667
668         if (BLI_array_count(face_arr)) {
669                 bmesh_face_swap_data(f, face_arr[0]);
670                 BM_face_kill(bm, face_arr[0]);
671                 face_arr[0] = f;
672         }
673         else {
674                 BM_ELEM_API_FLAG_DISABLE(f, FACE_NET);
675         }
676
677         for (i = 0; i < BLI_array_count(face_arr); i++) {
678                 BM_ELEM_API_FLAG_DISABLE(face_arr[i], FACE_NET);
679         }
680
681         if (r_face_arr) {
682                 *r_face_arr = face_arr;
683                 *r_face_arr_len = BLI_array_count(face_arr);
684         }
685         else {
686                 if (face_arr) {
687                         MEM_freeN(face_arr);
688                 }
689         }
690
691         return true;
692 }
693
694 #undef FACE_NET
695 #undef VERT_VISIT
696 #undef EDGE_NET
697
698 /** \} */
699
700
701 /* -------------------------------------------------------------------- */
702 /* Face Split Edge-Net Connect Islands */
703
704 /** \name BM_face_split_edgenet_connect_islands and helper functions.
705  *
706  * Connect isolated mesh 'islands' so they form legal regions from which we can create faces.
707  *
708  * Intended to be used as a pre-processing step for #BM_face_split_edgenet.
709  *
710  * \warning Currently this risks running out of stack memory (#alloca),
711  * likely we'll pass in a memory arena (cleared each use) eventually.
712  *
713  * \{ */
714
715
716 #define USE_PARTIAL_CONNECT
717
718
719 #define VERT_IS_VALID BM_ELEM_INTERNAL_TAG
720
721 /* can be X or Y */
722 #define SORT_AXIS 0
723
724 BLI_INLINE bool edge_isect_verts_point_2d(
725         const BMEdge *e, const BMVert *v_a, const BMVert *v_b,
726         float r_isect[2])
727 {
728         /* This bias seems like it could be too large,
729          * mostly its not needed, see T52329 for example where it is. */
730         const float endpoint_bias = 1e-4f;
731         return ((isect_seg_seg_v2_point_ex(v_a->co, v_b->co, e->v1->co, e->v2->co, endpoint_bias, r_isect) == 1) &&
732                 ((e->v1 != v_a) && (e->v2 != v_a) && (e->v1 != v_b)  && (e->v2 != v_b)));
733 }
734
735 BLI_INLINE int axis_pt_cmp(const float pt_a[2], const float pt_b[2])
736 {
737         if (pt_a[0] < pt_b[0]) {
738                 return -1;
739         }
740         if (pt_a[0] > pt_b[0]) {
741                 return 1;
742         }
743         if (pt_a[1] < pt_b[1]) {
744                 return -1;
745         }
746         if (pt_a[1] > pt_b[1]) {
747                 return 1;
748         }
749         return 0;
750 }
751
752 /**
753  * Represents isolated edge-links,
754  * each island owns contiguous slices of the vert array.
755  * (edges remain in `edge_links`).
756  */
757 struct EdgeGroupIsland {
758         LinkNode edge_links;  /* keep first */
759         uint vert_len, edge_len;
760
761         /* Set the following vars once we have >1 groups */
762
763         /* when when an edge in a previous group connects to this one,
764          * so theres no need to create one pointing back. */
765         uint has_prev_edge : 1;
766
767         /* verts in the group which has the lowest & highest values,
768          * the lower vertex is connected to the first edge */
769         struct {
770                 BMVert *min, *max;
771                 /* used for sorting only */
772                 float min_axis[2];
773                 float max_axis[2];
774         } vert_span;
775 };
776
777 static int group_min_cmp_fn(const void *p1, const void *p2)
778 {
779         const struct EdgeGroupIsland *g1 = *(struct EdgeGroupIsland **)p1;
780         const struct EdgeGroupIsland *g2 = *(struct EdgeGroupIsland **)p2;
781         /* min->co[SORT_AXIS] hasn't been applied yet */
782         int test = axis_pt_cmp(g1->vert_span.min_axis, g2->vert_span.min_axis);
783         if (UNLIKELY(test == 0)) {
784                 test = axis_pt_cmp(g1->vert_span.max_axis, g2->vert_span.max_axis);
785         }
786         return test;
787 }
788
789 struct Edges_VertVert_BVHTreeTest {
790         float dist_orig;
791         BMEdge **edge_arr;
792
793         BMVert *v_origin;
794         BMVert *v_other;
795
796         const uint *vert_range;
797 };
798
799 struct Edges_VertRay_BVHTreeTest {
800         BMEdge **edge_arr;
801
802         BMVert *v_origin;
803
804         const uint *vert_range;
805 };
806
807 static void bvhtree_test_edges_isect_2d_vert_cb(
808         void *user_data, int index, const BVHTreeRay *UNUSED(ray), BVHTreeRayHit *hit)
809 {
810         struct Edges_VertVert_BVHTreeTest *data = user_data;
811         const BMEdge *e = data->edge_arr[index];
812         const int v1_index = BM_elem_index_get(e->v1);
813         float co_isect[2];
814
815         if (edge_isect_verts_point_2d(e, data->v_origin, data->v_other, co_isect)) {
816                 const float t = line_point_factor_v2(co_isect, data->v_origin->co, data->v_other->co);
817                 const float dist_new = data->dist_orig * t;
818                 /* avoid float precision issues, possible this is greater,
819                  * check above zero to allow some overlap
820                  * (and needed for partial-connect which will overlap vertices) */
821                 if (LIKELY((dist_new < hit->dist) && (dist_new > 0.0f))) {
822                         /* v1/v2 will both be in the same group */
823                         if (v1_index <  (int)data->vert_range[0] ||
824                             v1_index >= (int)data->vert_range[1])
825                         {
826                                 hit->dist = dist_new;
827                                 hit->index = index;
828                         }
829                 }
830         }
831 }
832
833 static void bvhtree_test_edges_isect_2d_ray_cb(
834         void *user_data, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit)
835 {
836         struct Edges_VertRay_BVHTreeTest *data = user_data;
837         const BMEdge *e = data->edge_arr[index];
838
839         /* direction is normalized, so this will be the distance */
840         float dist_new;
841         if (isect_ray_seg_v2(data->v_origin->co, ray->direction, e->v1->co, e->v2->co, &dist_new, NULL)) {
842                 /* avoid float precision issues, possible this is greater,
843                  * check above zero to allow some overlap
844                  * (and needed for partial-connect which will overlap vertices) */
845                 if (LIKELY(dist_new < hit->dist && (dist_new > 0.0f))) {
846                         if (e->v1 != data->v_origin && e->v2 != data->v_origin) {
847                                 const int v1_index = BM_elem_index_get(e->v1);
848                                 /* v1/v2 will both be in the same group */
849                                 if (v1_index <  (int)data->vert_range[0] ||
850                                     v1_index >= (int)data->vert_range[1])
851                                 {
852                                         hit->dist = dist_new;
853                                         hit->index = index;
854                                 }
855                         }
856                 }
857         }
858 }
859
860 /**
861  * Store values for:
862  * - #bm_face_split_edgenet_find_connection
863  * - #test_edges_isect_2d_vert
864  * ... which don't change each call.
865  */
866 struct EdgeGroup_FindConnection_Args {
867         BVHTree *bvhtree;
868         BMEdge **edge_arr;
869         uint edge_arr_len;
870
871         BMEdge **edge_arr_new;
872         uint edge_arr_new_len;
873
874         const uint *vert_range;
875 };
876
877 static BMEdge *test_edges_isect_2d_vert(
878         const struct EdgeGroup_FindConnection_Args *args,
879         BMVert *v_origin, BMVert *v_other)
880 {
881         int index;
882
883         BVHTreeRayHit hit = {0};
884         float dir[3];
885
886         sub_v2_v2v2(dir, v_other->co, v_origin->co);
887         dir[2] = 0.0f;
888         hit.index = -1;
889         hit.dist = normalize_v2(dir);
890
891         struct Edges_VertVert_BVHTreeTest user_data = {0};
892         user_data.dist_orig = hit.dist;
893         user_data.edge_arr = args->edge_arr;
894         user_data.v_origin = v_origin;
895         user_data.v_other = v_other;
896         user_data.vert_range = args->vert_range;
897
898         index = BLI_bvhtree_ray_cast_ex(
899                 args->bvhtree, v_origin->co, dir, 0.0f, &hit,
900                 bvhtree_test_edges_isect_2d_vert_cb, &user_data, 0);
901
902         BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
903
904         /* check existing connections (no spatial optimization here since we're continually adding). */
905         if (LIKELY(index == -1)) {
906                 float t_best = 1.0f;
907                 for (uint i = 0; i < args->edge_arr_new_len; i++) {
908                         float co_isect[2];
909                         if (UNLIKELY(edge_isect_verts_point_2d(args->edge_arr_new[i], v_origin, v_other, co_isect))) {
910                                 const float t_test = line_point_factor_v2(co_isect, v_origin->co, v_other->co);
911                                 if (t_test < t_best) {
912                                         t_best = t_test;
913
914                                         e_hit = args->edge_arr_new[i];
915                                 }
916                         }
917                 }
918         }
919
920         return e_hit;
921 }
922
923 /**
924  * Similar to #test_edges_isect_2d_vert but we're casting into a direction,
925  * (not to a vertex)
926  */
927 static BMEdge *test_edges_isect_2d_ray(
928         const struct EdgeGroup_FindConnection_Args *args,
929         BMVert *v_origin, const float dir[3])
930 {
931         int index;
932         BVHTreeRayHit hit = {0};
933
934         BLI_ASSERT_UNIT_V2(dir);
935
936         hit.index = -1;
937         hit.dist = BVH_RAYCAST_DIST_MAX;
938
939         struct Edges_VertRay_BVHTreeTest user_data = {0};
940         user_data.edge_arr = args->edge_arr;
941         user_data.v_origin = v_origin;
942         user_data.vert_range = args->vert_range;
943
944         index = BLI_bvhtree_ray_cast_ex(
945                 args->bvhtree, v_origin->co, dir, 0.0f, &hit,
946                 bvhtree_test_edges_isect_2d_ray_cb, &user_data, 0);
947
948         BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
949
950         /* check existing connections (no spatial optimization here since we're continually adding). */
951         if (LIKELY(index != -1)) {
952                 for (uint i = 0; i < args->edge_arr_new_len; i++) {
953                         BMEdge *e = args->edge_arr_new[i];
954                         float dist_new;
955                         if (isect_ray_seg_v2(v_origin->co, dir, e->v1->co, e->v2->co, &dist_new, NULL)) {
956                                 if (e->v1 != v_origin && e->v2 != v_origin) {
957                                         /* avoid float precision issues, possible this is greater */
958                                         if (LIKELY(dist_new < hit.dist)) {
959                                                 hit.dist = dist_new;
960
961                                                 e_hit = args->edge_arr_new[i];
962                                         }
963                                 }
964                         }
965                 }
966         }
967
968         return e_hit;
969 }
970
971 static int bm_face_split_edgenet_find_connection(
972         const struct EdgeGroup_FindConnection_Args *args,
973         BMVert *v_origin,
974         /* false = negative, true = positive */
975         bool direction_sign)
976 {
977         /**
978          * Method for finding connection is as follows:
979          *
980          * - Cast a ray along either the positive or negative directions.
981          * - Take the hit-edge, and cast rays to their vertices checking those rays don't intersect a closer edge.
982          * - Keep taking the hit-edge and testing its verts until a vertex is found which isn't blocked by an edge.
983          *
984          * \note It's possible none of the verts can be accessed (with self-intersecting lines).
985          * In that case theres no right answer (without subdividing edges),
986          * so return a fall-back vertex in that case.
987          */
988
989         const float dir[3] = {[SORT_AXIS] = direction_sign ? 1.0f : -1.0f};
990         BMEdge *e_hit = test_edges_isect_2d_ray(args, v_origin, dir);
991         BMVert *v_other = NULL;
992
993         if (e_hit) {
994                 BMVert *v_other_fallback = NULL;
995
996                 BLI_SMALLSTACK_DECLARE(vert_search, BMVert *);
997
998                 /* ensure we never add verts multiple times (not all that likely - but possible) */
999                 BLI_SMALLSTACK_DECLARE(vert_blacklist, BMVert *);
1000
1001                 do {
1002                         BMVert *v_pair[2];
1003                         /* ensure the closest vertex is popped back off the stack first */
1004                         if (len_squared_v2v2(v_origin->co, e_hit->v1->co) >
1005                             len_squared_v2v2(v_origin->co, e_hit->v2->co))
1006                         {
1007                                 ARRAY_SET_ITEMS(v_pair, e_hit->v1, e_hit->v2);
1008                         }
1009                         else {
1010                                 ARRAY_SET_ITEMS(v_pair, e_hit->v2, e_hit->v1);
1011                         }
1012
1013                         for (int j = 0; j < 2; j++) {
1014                                 BMVert *v_iter = v_pair[j];
1015                                 if (BM_elem_flag_test(v_iter, VERT_IS_VALID)) {
1016                                         if (direction_sign ? (v_iter->co[SORT_AXIS] > v_origin->co[SORT_AXIS]) :
1017                                                              (v_iter->co[SORT_AXIS] < v_origin->co[SORT_AXIS]))
1018                                         {
1019                                                 BLI_SMALLSTACK_PUSH(vert_search, v_iter);
1020                                                 BLI_SMALLSTACK_PUSH(vert_blacklist, v_iter);
1021                                                 BM_elem_flag_disable(v_iter, VERT_IS_VALID);
1022                                         }
1023                                 }
1024                         }
1025                         v_other_fallback = v_other;
1026
1027                 } while ((v_other = BLI_SMALLSTACK_POP(vert_search)) &&
1028                          (e_hit   = test_edges_isect_2d_vert(args, v_origin, v_other)));
1029
1030                 if (v_other == NULL) {
1031                         printf("Using fallback\n");
1032                         v_other = v_other_fallback;
1033                 }
1034
1035                 /* reset the blacklist flag, for future use */
1036                 BMVert *v;
1037                 while ((v = BLI_SMALLSTACK_POP(vert_blacklist))) {
1038                         BM_elem_flag_enable(v, VERT_IS_VALID);
1039                 }
1040         }
1041
1042         /* if we reach this line, v_other is either the best vertex or its NULL */
1043         return v_other ? BM_elem_index_get(v_other) : -1;
1044 }
1045
1046
1047 /**
1048  * Support for connecting islands that have single-edge connections.
1049  * This options is not very optimal (however its not needed for booleans either).
1050  */
1051 #ifdef USE_PARTIAL_CONNECT
1052
1053 /**
1054  * Split vertices which are part of a partial connection
1055  * (only a single vertex connecting an island).
1056  *
1057  * \note All edges and vertices must have their #BM_ELEM_INTERNAL_TAG flag enabled.
1058  * This function leaves all the flags set as well.
1059  *
1060  */
1061 static BMVert *bm_face_split_edgenet_partial_connect(BMesh *bm, BMVert *v_delimit)
1062 {
1063         /* -------------------------------------------------------------------- */
1064         /* Initial check that we may be a delimiting vert (keep this fast) */
1065
1066         /* initial check - see if we have 3+ flagged edges attached to 'v_delimit'
1067          * if not, we can early exit */
1068         LinkNode *e_delimit_list = NULL;
1069         uint e_delimit_list_len = 0;
1070
1071 #define EDGE_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1072 #define VERT_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1073
1074 #define FOREACH_VERT_EDGE(v_, e_, body_) { \
1075         BMEdge *e_ = v_->e; \
1076         do { body_ } while ((e_ = BM_DISK_EDGE_NEXT(e_, v_)) != v_->e); \
1077 } ((void)0)
1078
1079         /* start with face edges, since we need to split away wire-only edges */
1080         BMEdge *e_face_init = NULL;
1081
1082         FOREACH_VERT_EDGE(v_delimit, e_iter, {
1083                 if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1084                         BLI_assert(BM_elem_flag_test(BM_edge_other_vert(e_iter, v_delimit), VERT_NOT_IN_STACK));
1085                         BLI_linklist_prepend_alloca(&e_delimit_list, e_iter);
1086                         e_delimit_list_len++;
1087                         if (e_iter->l != NULL) {
1088                                 e_face_init = e_iter;
1089                         }
1090                 }
1091         });
1092
1093         /* skip typical edge-chain verts */
1094         if (LIKELY(e_delimit_list_len <= 2)) {
1095                 return NULL;
1096         }
1097
1098
1099         /* -------------------------------------------------------------------- */
1100         /* Complicated stuff starts now! */
1101
1102
1103         /* Store connected vertices for restoring the flag */
1104         LinkNode *vert_stack = NULL;
1105         BLI_linklist_prepend_alloca(&vert_stack, v_delimit);
1106         BM_elem_flag_disable(v_delimit, VERT_NOT_IN_STACK);
1107
1108         /* Walk the net... */
1109         {
1110                 BLI_SMALLSTACK_DECLARE(search, BMVert *);
1111                 BMVert *v_other = BM_edge_other_vert(e_face_init ? e_face_init : v_delimit->e, v_delimit);
1112
1113                 BLI_SMALLSTACK_PUSH(search, v_other);
1114                 BM_elem_flag_disable(v_other, VERT_NOT_IN_STACK);
1115
1116                 while ((v_other = BLI_SMALLSTACK_POP(search))) {
1117                         BLI_assert(BM_elem_flag_test(v_other, VERT_NOT_IN_STACK) == false);
1118                         BLI_linklist_prepend_alloca(&vert_stack, v_other);
1119                         BMEdge *e_iter = v_other->e;
1120                         do {
1121                                 BMVert *v_step = BM_edge_other_vert(e_iter, v_other);
1122                                 if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1123                                         if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK)) {
1124                                                 BM_elem_flag_disable(v_step, VERT_NOT_IN_STACK);
1125                                                 BLI_SMALLSTACK_PUSH(search, v_step);
1126                                         }
1127                                 }
1128                         } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_other)) != v_other->e);
1129                 }
1130         }
1131
1132         /* Detect if this is a delimiter by checking if we didn't walk any of edges connected to 'v_delimit' */
1133         bool is_delimit = false;
1134         FOREACH_VERT_EDGE(v_delimit, e_iter, {
1135                 BMVert *v_step = BM_edge_other_vert(e_iter, v_delimit);
1136                 if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK) && BM_edge_is_wire(e_iter)) {
1137                         is_delimit = true;  /* if one vertex is valid - we have a mix */
1138                 }
1139                 else {
1140                         /* match the vertex flag (only for edges around 'v_delimit') */
1141                         BM_elem_flag_disable(e_iter, EDGE_NOT_IN_STACK);
1142                 }
1143         });
1144
1145 #undef FOREACH_VERT_EDGE
1146
1147         /* Execute the split */
1148         BMVert *v_split = NULL;
1149         if (is_delimit) {
1150                 v_split = BM_vert_create(bm, v_delimit->co, NULL, 0);
1151                 BM_vert_separate_wire_hflag(bm, v_split, v_delimit, EDGE_NOT_IN_STACK);
1152                 BM_elem_flag_enable(v_split, VERT_NOT_IN_STACK);
1153
1154                 BLI_assert(v_delimit->e != NULL);
1155                 BLI_assert(v_split->e != NULL);
1156         }
1157
1158         /* Restore flags */
1159         do {
1160                 BM_elem_flag_enable((BMVert *)vert_stack->link, VERT_NOT_IN_STACK);
1161         } while ((vert_stack = vert_stack->next));
1162
1163         do {
1164                 BM_elem_flag_enable((BMEdge *)e_delimit_list->link, EDGE_NOT_IN_STACK);
1165         } while ((e_delimit_list = e_delimit_list->next));
1166
1167 #undef EDGE_NOT_IN_STACK
1168 #undef VERT_NOT_IN_STACK
1169
1170         return v_split;
1171 }
1172
1173
1174 /**
1175  * Check if connecting vertices would cause an edge with duplicate verts.
1176  */
1177 static bool bm_vert_partial_connect_check_overlap(
1178         const int *remap,
1179         const int v_a_index, const int v_b_index)
1180 {
1181         /* connected to eachother */
1182         if (UNLIKELY((remap[v_a_index] == v_b_index) ||
1183                      (remap[v_b_index] == v_a_index)))
1184         {
1185                 return true;
1186         }
1187         else {
1188                 return false;
1189         }
1190 }
1191
1192
1193 #endif  /* USE_PARTIAL_CONNECT */
1194
1195
1196
1197 /**
1198  * For when the edge-net has holes in it-this connects them.
1199  *
1200  * \param use_partial_connect: Support for handling islands connected by only a single edge,
1201  * \note that this is quite slow so avoid using where possible.
1202  * \param mem_arena: Avoids many small allocs & should be cleared after each use.
1203  * take care since \a r_edge_net_new is stored in \a r_edge_net_new.
1204  */
1205 bool BM_face_split_edgenet_connect_islands(
1206         BMesh *bm,
1207         BMFace *f, BMEdge **edge_net_init, const uint edge_net_init_len,
1208         bool use_partial_connect,
1209         MemArena *mem_arena,
1210         BMEdge ***r_edge_net_new, uint *r_edge_net_new_len)
1211 {
1212         /* -------------------------------------------------------------------- */
1213         /* This function has 2 main parts.
1214          *
1215          * - Check if there are any holes.
1216          * - Connect the holes with edges (if any are found).
1217          *
1218          * Keep the first part fast since it will run very often for edge-nets that have no holes.
1219          *
1220          * \note Don't use the mem_arena unless he have holes to fill.
1221          * (avoid thrashing the area when the initial check isn't so intensive on the stack).
1222          */
1223
1224         const uint edge_arr_len = (uint)edge_net_init_len + (uint)f->len;
1225         BMEdge **edge_arr = BLI_array_alloca(edge_arr, edge_arr_len);
1226         bool ok = false;
1227
1228         memcpy(edge_arr, edge_net_init, sizeof(*edge_arr) * (size_t)edge_net_init_len);
1229
1230         /* _must_ clear on exit */
1231 #define EDGE_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1232 #define VERT_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1233
1234         {
1235                 uint i = edge_net_init_len;
1236                 BMLoop *l_iter, *l_first;
1237                 l_iter = l_first = BM_FACE_FIRST_LOOP(f);
1238                 do {
1239                         BLI_assert(!BM_elem_flag_test(l_iter->v, VERT_NOT_IN_STACK));
1240                         BLI_assert(!BM_elem_flag_test(l_iter->e, EDGE_NOT_IN_STACK));
1241                         edge_arr[i++] = l_iter->e;
1242                 } while ((l_iter = l_iter->next) != l_first);
1243                 BLI_assert(i == edge_arr_len);
1244         }
1245
1246         for (uint i = 0; i < edge_arr_len; i++) {
1247                 BM_elem_flag_enable(edge_arr[i], EDGE_NOT_IN_STACK);
1248                 BM_elem_flag_enable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
1249                 BM_elem_flag_enable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
1250         }
1251
1252
1253
1254 #ifdef USE_PARTIAL_CONNECT
1255         /* Split-out delimiting vertices */
1256         struct TempVertPair {
1257                 struct TempVertPair *next;
1258                 BMVert *v_temp;
1259                 BMVert *v_orig;
1260         };
1261
1262         struct {
1263                 struct TempVertPair *list;
1264                 uint len;
1265                 int *remap;  /* temp -> orig mapping */
1266         } temp_vert_pairs = {NULL};
1267
1268         if (use_partial_connect) {
1269                 for (uint i = 0; i < edge_net_init_len; i++) {
1270                         for (unsigned j = 0; j < 2; j++) {
1271                                 BMVert *v_delimit = (&edge_arr[i]->v1)[j];
1272                                 BMVert *v_other;
1273
1274                                 /* note, remapping will _never_ map a vertex to an already mapped vertex */
1275                                 while (UNLIKELY((v_other = bm_face_split_edgenet_partial_connect(bm, v_delimit)))) {
1276                                         struct TempVertPair *tvp = BLI_memarena_alloc(mem_arena, sizeof(*tvp));
1277                                         tvp->next = temp_vert_pairs.list;
1278                                         tvp->v_orig = v_delimit;
1279                                         tvp->v_temp = v_other;
1280                                         temp_vert_pairs.list = tvp;
1281                                         temp_vert_pairs.len++;
1282                                 }
1283                         }
1284                 }
1285
1286                 if (temp_vert_pairs.len == 0) {
1287                         use_partial_connect = false;
1288                 }
1289         }
1290 #endif  /* USE_PARTIAL_CONNECT */
1291
1292
1293
1294         uint group_arr_len = 0;
1295         LinkNode *group_head = NULL;
1296         {
1297                 /* scan 'edge_arr' backwards so the outer face boundary is handled first
1298                  * (since its likely to be the largest) */
1299                 uint edge_index = (edge_arr_len - 1);
1300                 uint edge_in_group_tot = 0;
1301
1302                 BLI_SMALLSTACK_DECLARE(vstack, BMVert *);
1303
1304                 while (true) {
1305                         LinkNode *edge_links = NULL;
1306                         uint unique_verts_in_group = 0, unique_edges_in_group = 0;
1307
1308                         /* list of groups */
1309                         BLI_assert(BM_elem_flag_test(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK));
1310                         BLI_SMALLSTACK_PUSH(vstack, edge_arr[edge_index]->v1);
1311                         BM_elem_flag_disable(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK);
1312
1313                         BMVert *v_iter;
1314                         while ((v_iter = BLI_SMALLSTACK_POP(vstack))) {
1315                                 unique_verts_in_group++;
1316
1317                                 BMEdge *e_iter = v_iter->e;
1318                                 do {
1319                                         if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1320                                                 BM_elem_flag_disable(e_iter, EDGE_NOT_IN_STACK);
1321                                                 unique_edges_in_group++;
1322
1323                                                 BLI_linklist_prepend_alloca(&edge_links, e_iter);
1324
1325                                                 BMVert *v_other = BM_edge_other_vert(e_iter, v_iter);
1326                                                 if (BM_elem_flag_test(v_other, VERT_NOT_IN_STACK)) {
1327                                                         BLI_SMALLSTACK_PUSH(vstack, v_other);
1328                                                         BM_elem_flag_disable(v_other, VERT_NOT_IN_STACK);
1329                                                 }
1330                                         }
1331                                 } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_iter)) != v_iter->e);
1332                         }
1333
1334                         struct EdgeGroupIsland *g = alloca(sizeof(*g));
1335                         g->vert_len = unique_verts_in_group;
1336                         g->edge_len = unique_edges_in_group;
1337                         edge_in_group_tot += unique_edges_in_group;
1338
1339                         BLI_linklist_prepend_nlink(&group_head, edge_links, (LinkNode *)g);
1340
1341                         group_arr_len++;
1342
1343                         if (edge_in_group_tot == edge_arr_len) {
1344                                 break;
1345                         }
1346
1347                         /* skip edges in the stack */
1348                         while (BM_elem_flag_test(edge_arr[edge_index], EDGE_NOT_IN_STACK) == false) {
1349                                 BLI_assert(edge_index != 0);
1350                                 edge_index--;
1351                         }
1352                 }
1353         }
1354
1355         /* single group - no holes */
1356         if (group_arr_len == 1) {
1357                 goto finally;
1358         }
1359
1360
1361         /* -------------------------------------------------------------------- */
1362         /* Previous checks need to be kept fast, since they will run very often,
1363          * now we know there are holes, so calculate a spatial lookup info and
1364          * other per-group data.
1365          */
1366
1367         float axis_mat[3][3];
1368         axis_dominant_v3_to_m3(axis_mat, f->no);
1369
1370 #define VERT_IN_ARRAY BM_ELEM_INTERNAL_TAG
1371
1372         struct EdgeGroupIsland **group_arr = BLI_memarena_alloc(mem_arena, sizeof(*group_arr) * group_arr_len);
1373         uint vert_arr_len = 0;
1374         /* sort groups by lowest value vertex */
1375         {
1376                 /* fill 'groups_arr' in reverse order so the boundary face is first */
1377                 struct EdgeGroupIsland **group_arr_p = &group_arr[group_arr_len];
1378
1379                 for (struct EdgeGroupIsland *g = (void *)group_head; g; g = (struct EdgeGroupIsland *)g->edge_links.next) {
1380                         LinkNode *edge_links = g->edge_links.link;
1381
1382                         /* init with *any* different verts */
1383                         g->vert_span.min = ((BMEdge *)edge_links->link)->v1;
1384                         g->vert_span.max = ((BMEdge *)edge_links->link)->v2;
1385                         float min_axis[2] =  {FLT_MAX, FLT_MAX};
1386                         float max_axis[2] = {-FLT_MAX, -FLT_MAX};
1387
1388                         do {
1389                                 BMEdge *e = edge_links->link;
1390                                 BLI_assert(e->head.htype == BM_EDGE);
1391
1392                                 for (int j = 0; j < 2; j++) {
1393                                         BMVert *v_iter = (&e->v1)[j];
1394                                         BLI_assert(v_iter->head.htype == BM_VERT);
1395                                         /* ideally we could use 'v_iter->co[SORT_AXIS]' here,
1396                                          * but we need to sort the groups before setting the vertex array order */
1397                                         const float axis_value[2] = {
1398 #if SORT_AXIS == 0
1399                                             dot_m3_v3_row_x(axis_mat, v_iter->co),
1400                                             dot_m3_v3_row_y(axis_mat, v_iter->co),
1401 #else
1402                                             dot_m3_v3_row_y(axis_mat, v_iter->co),
1403                                             dot_m3_v3_row_x(axis_mat, v_iter->co),
1404 #endif
1405                                         };
1406
1407                                         if (axis_pt_cmp(axis_value, min_axis) == -1) {
1408                                                 g->vert_span.min = v_iter;
1409                                                 copy_v2_v2(min_axis, axis_value);
1410                                         }
1411                                         if (axis_pt_cmp(axis_value, max_axis) == 1) {
1412                                                 g->vert_span.max = v_iter;
1413                                                 copy_v2_v2(max_axis, axis_value);
1414                                         }
1415                                 }
1416                         } while ((edge_links = edge_links->next));
1417
1418                         copy_v2_v2(g->vert_span.min_axis, min_axis);
1419                         copy_v2_v2(g->vert_span.max_axis, max_axis);
1420
1421                         g->has_prev_edge = false;
1422
1423                         vert_arr_len += g->vert_len;
1424
1425                         *(--group_arr_p) = g;
1426                 }
1427         }
1428
1429         qsort(group_arr, group_arr_len, sizeof(*group_arr), group_min_cmp_fn);
1430
1431         /* we don't know how many unique verts there are connecting the edges, so over-alloc */
1432         BMVert **vert_arr = BLI_memarena_alloc(mem_arena, sizeof(*vert_arr) * vert_arr_len);
1433         /* map vertex -> group index */
1434         uint *verts_group_table = BLI_memarena_alloc(mem_arena, sizeof(*verts_group_table) * vert_arr_len);
1435
1436         float (*vert_coords_backup)[3] = BLI_memarena_alloc(mem_arena, sizeof(*vert_coords_backup) * vert_arr_len);
1437
1438         {
1439                 /* relative location, for higher precision calculations */
1440                 const float f_co_ref[3] = {UNPACK3(BM_FACE_FIRST_LOOP(f)->v->co)};
1441
1442                 int v_index = 0;  /* global vert index */
1443                 for (uint g_index = 0; g_index < group_arr_len; g_index++) {
1444                         LinkNode *edge_links = group_arr[g_index]->edge_links.link;
1445                         do {
1446                                 BMEdge *e = edge_links->link;
1447                                 for (int j = 0; j < 2; j++) {
1448                                         BMVert *v_iter = (&e->v1)[j];
1449                                         if (!BM_elem_flag_test(v_iter, VERT_IN_ARRAY)) {
1450                                                 BM_elem_flag_enable(v_iter, VERT_IN_ARRAY);
1451
1452                                                 /* not nice, but alternatives arent much better :S */
1453                                                 {
1454                                                         copy_v3_v3(vert_coords_backup[v_index], v_iter->co);
1455
1456                                                         /* for higher precision */
1457                                                         sub_v3_v3(v_iter->co, f_co_ref);
1458
1459                                                         float co_2d[2];
1460                                                         mul_v2_m3v3(co_2d, axis_mat, v_iter->co);
1461                                                         v_iter->co[0] = co_2d[0];
1462                                                         v_iter->co[1] = co_2d[1];
1463                                                         v_iter->co[2] = 0.0f;
1464                                                 }
1465
1466                                                 BM_elem_index_set(v_iter, v_index);  /* set_dirty */
1467
1468                                                 vert_arr[v_index] = v_iter;
1469                                                 verts_group_table[v_index] = g_index;
1470                                                 v_index++;
1471                                         }
1472                                 }
1473                         } while ((edge_links = edge_links->next));
1474                 }
1475         }
1476
1477         bm->elem_index_dirty |= BM_VERT;
1478
1479         /* Now create bvh tree
1480          *
1481          * Note that a large epsilon is used because meshes with dimensions of around 100+ need it. see T52329. */
1482         BVHTree *bvhtree = BLI_bvhtree_new(edge_arr_len, 1e-4f, 8, 8);
1483         for (uint i = 0; i < edge_arr_len; i++) {
1484                 const float e_cos[2][3] = {
1485                     {UNPACK2(edge_arr[i]->v1->co), 0.0f},
1486                     {UNPACK2(edge_arr[i]->v2->co), 0.0f},
1487                 };
1488                 BLI_bvhtree_insert(bvhtree, i, (const float *)e_cos, 2);
1489         }
1490         BLI_bvhtree_balance(bvhtree);
1491
1492
1493
1494 #ifdef USE_PARTIAL_CONNECT
1495         if (use_partial_connect) {
1496                 /* needs to be done once the vertex indices have been written into */
1497                 temp_vert_pairs.remap = BLI_memarena_alloc(mem_arena, sizeof(*temp_vert_pairs.remap) * vert_arr_len);
1498                 copy_vn_i(temp_vert_pairs.remap, vert_arr_len, -1);
1499
1500                 struct TempVertPair *tvp = temp_vert_pairs.list;
1501                 do {
1502                         temp_vert_pairs.remap[BM_elem_index_get(tvp->v_temp)] = BM_elem_index_get(tvp->v_orig);
1503                 } while ((tvp = tvp->next));
1504         }
1505 #endif  /* USE_PARTIAL_CONNECT */
1506
1507
1508
1509         /* Create connections between groups */
1510
1511         /* may be an over-alloc, but not by much */
1512         uint edge_net_new_len = (uint)edge_net_init_len + ((group_arr_len - 1) * 2);
1513         BMEdge **edge_net_new = BLI_memarena_alloc(mem_arena, sizeof(*edge_net_new) * edge_net_new_len);
1514         memcpy(edge_net_new, edge_net_init, sizeof(*edge_net_new) * (size_t)edge_net_init_len);
1515
1516         {
1517                 uint edge_net_new_index = edge_net_init_len;
1518                 /* start-end of the verts in the current group */
1519
1520                 uint vert_range[2];
1521
1522                 vert_range[0] = 0;
1523                 vert_range[1] = group_arr[0]->vert_len;
1524
1525                 struct EdgeGroup_FindConnection_Args args = {
1526                         .bvhtree = bvhtree,
1527
1528                         /* use the new edge array so we can scan edges which have been added */
1529                         .edge_arr = edge_arr,
1530                         .edge_arr_len = edge_arr_len,
1531
1532                         /* we only want to check newly created edges */
1533                         .edge_arr_new = edge_net_new + edge_net_init_len,
1534                         .edge_arr_new_len = 0,
1535
1536                         .vert_range = vert_range,
1537                 };
1538
1539                 for (uint g_index = 1; g_index < group_arr_len; g_index++) {
1540                         struct EdgeGroupIsland *g = group_arr[g_index];
1541
1542                         /* the range of verts this group uses in 'verts_arr' (not uncluding the last index) */
1543                         vert_range[0]  = vert_range[1];
1544                         vert_range[1] += g->vert_len;
1545
1546                         if (g->has_prev_edge == false) {
1547                                 BMVert *v_origin = g->vert_span.min;
1548
1549                                 const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, false);
1550                                 // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1551
1552                                 /* only for degenerate geometry */
1553                                 if (index_other != -1) {
1554 #ifdef USE_PARTIAL_CONNECT
1555                                         if ((use_partial_connect == false) ||
1556                                             (bm_vert_partial_connect_check_overlap(
1557                                                  temp_vert_pairs.remap,
1558                                                  BM_elem_index_get(v_origin), index_other) == false))
1559 #endif
1560                                         {
1561                                                 BMVert *v_end = vert_arr[index_other];
1562
1563                                                 edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1564 #ifdef USE_PARTIAL_CONNECT
1565                                                 BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1566 #endif
1567                                                 edge_net_new_index++;
1568                                                 args.edge_arr_new_len++;
1569                                         }
1570                                 }
1571                         }
1572
1573                         {
1574                                 BMVert *v_origin = g->vert_span.max;
1575
1576                                 const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, true);
1577                                 // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1578
1579                                 /* only for degenerate geometry */
1580                                 if (index_other != -1) {
1581 #ifdef USE_PARTIAL_CONNECT
1582                                         if ((use_partial_connect == false) ||
1583                                             (bm_vert_partial_connect_check_overlap(
1584                                                  temp_vert_pairs.remap,
1585                                                  BM_elem_index_get(v_origin), index_other) == false))
1586 #endif
1587                                         {
1588                                                 BMVert *v_end = vert_arr[index_other];
1589                                                 edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1590 #ifdef USE_PARTIAL_CONNECT
1591                                                 BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1592 #endif
1593                                                 edge_net_new_index++;
1594                                                 args.edge_arr_new_len++;
1595                                         }
1596
1597                                         /* tell the 'next' group it doesn't need to create its own back-link */
1598                                         uint g_index_other = verts_group_table[index_other];
1599                                         group_arr[g_index_other]->has_prev_edge = true;
1600                                 }
1601                         }
1602
1603                 }
1604                 BLI_assert(edge_net_new_len >= edge_net_new_index);
1605                 edge_net_new_len = edge_net_new_index;
1606         }
1607
1608         BLI_bvhtree_free(bvhtree);
1609
1610         *r_edge_net_new = edge_net_new;
1611         *r_edge_net_new_len = edge_net_new_len;
1612         ok = true;
1613
1614         for (uint i = 0; i < vert_arr_len; i++) {
1615                 copy_v3_v3(vert_arr[i]->co, vert_coords_backup[i]);
1616         }
1617
1618 finally:
1619
1620 #ifdef USE_PARTIAL_CONNECT
1621         /* don't free 'vert_temp_pair_list', its part of the arena */
1622         if (use_partial_connect) {
1623
1624                 /* Sanity check: ensure we don't have connecting edges before splicing begins. */
1625 #ifdef DEBUG
1626                 {
1627                         struct TempVertPair *tvp = temp_vert_pairs.list;
1628                         do {
1629                                 /* we must _never_ create connections here
1630                                  * (inface the islands can't have a connection at all) */
1631                                 BLI_assert(BM_edge_exists(tvp->v_orig, tvp->v_temp) == NULL);
1632                         } while ((tvp = tvp->next));
1633                 }
1634 #endif
1635
1636                 struct TempVertPair *tvp = temp_vert_pairs.list;
1637                 do {
1638                         /* its _very_ unlikely the edge exists,
1639                          * however splicing may case this. see: T48012 */
1640                         if (!BM_edge_exists(tvp->v_orig, tvp->v_temp)) {
1641                                 BM_vert_splice(bm, tvp->v_orig, tvp->v_temp);
1642                         }
1643                 } while ((tvp = tvp->next));
1644
1645                 /* Remove edges which have become doubles since splicing vertices together,
1646                  * its less trouble then detecting future-doubles on edge-creation. */
1647                 for (uint i = edge_net_init_len; i < edge_net_new_len; i++) {
1648                         while (BM_edge_find_double(edge_net_new[i])) {
1649                                 BM_edge_kill(bm, edge_net_new[i]);
1650                                 edge_net_new_len--;
1651                                 if (i == edge_net_new_len) {
1652                                         break;
1653                                 }
1654                                 edge_net_new[i] = edge_net_new[edge_net_new_len];
1655                         }
1656                 }
1657
1658                 *r_edge_net_new_len = edge_net_new_len;
1659         }
1660 #endif
1661
1662
1663         for (uint i = 0; i < edge_arr_len; i++) {
1664                 BM_elem_flag_disable(edge_arr[i], EDGE_NOT_IN_STACK);
1665                 BM_elem_flag_disable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
1666                 BM_elem_flag_disable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
1667         }
1668
1669 #undef VERT_IN_ARRAY
1670 #undef VERT_NOT_IN_STACK
1671 #undef EDGE_NOT_IN_STACK
1672
1673         return ok;
1674 }
1675
1676 #undef SORT_AXIS
1677
1678 /** \} */