Cleanup: quiet warnings
[blender.git] / source / blender / bmesh / intern / bmesh_polygon_edgenet.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * ***** END GPL LICENSE BLOCK *****
19  */
20
21 /** \file blender/bmesh/intern/bmesh_polygon_edgenet.c
22  *  \ingroup bmesh
23  *
24  * This file contains functions for splitting faces into isolated regions,
25  * defined by connected edges.
26  */
27 // #define DEBUG_PRINT
28
29 #include "MEM_guardedalloc.h"
30
31 #include "BLI_math.h"
32 #include "BLI_memarena.h"
33 #include "BLI_array.h"
34 #include "BLI_alloca.h"
35 #include "BLI_stackdefines.h"
36 #include "BLI_linklist_stack.h"
37 #include "BLI_sort.h"
38 #include "BLI_sort_utils.h"
39 #include "BLI_kdopbvh.h"
40
41 #include "BKE_customdata.h"
42
43 #include "bmesh.h"
44 #include "intern/bmesh_private.h"
45
46 /* -------------------------------------------------------------------- */
47 /* Face Split Edge-Net */
48
49 /** \name BM_face_split_edgenet and helper functions.
50  *
51  * \note Don't use #BM_edge_is_wire or #BM_edge_is_boundary
52  * since we need to take flagged faces into account.
53  * Also take care accessing e->l directly.
54  *
55  * \{ */
56
57 /* Note: All these flags _must_ be cleared on exit */
58
59 /* face is apart of the edge-net (including the original face we're splitting) */
60 #define FACE_NET  _FLAG_WALK
61 /* edge is apart of the edge-net we're filling */
62 #define EDGE_NET   _FLAG_WALK
63 /* tag verts we've visit */
64 #define VERT_VISIT _FLAG_WALK
65 #define VERT_IN_QUEUE _FLAG_WALK_ALT
66
67 struct VertOrder {
68         float   angle;
69         BMVert *v;
70 };
71
72 static uint bm_edge_flagged_radial_count(BMEdge *e)
73 {
74         uint count = 0;
75         BMLoop *l;
76
77         if ((l = e->l)) {
78                 do {
79                         if (BM_ELEM_API_FLAG_TEST(l->f, FACE_NET)) {
80                                 count++;
81                         }
82                 } while ((l = l->radial_next) != e->l);
83         }
84         return count;
85 }
86
87 static BMLoop *bm_edge_flagged_radial_first(BMEdge *e)
88 {
89         BMLoop *l;
90
91         if ((l = e->l)) {
92                 do {
93                         if (BM_ELEM_API_FLAG_TEST(l->f, FACE_NET)) {
94                                 return l;
95                         }
96                 } while ((l = l->radial_next) != e->l);
97         }
98         return NULL;
99 }
100
101 static void normalize_v2_m3_v3v3(float out[2], float axis_mat[3][3], const float v1[3], const float v2[3])
102 {
103         float dir[3];
104         sub_v3_v3v3(dir, v1, v2);
105         mul_v2_m3v3(out, axis_mat, dir);
106         normalize_v2(out);
107 }
108
109
110 /**
111  * \note Be sure to update #bm_face_split_edgenet_find_loop_pair_exists
112  * when making changed to edge picking logic.
113  */
114 static bool bm_face_split_edgenet_find_loop_pair(
115         BMVert *v_init,
116         const float face_normal[3], float face_normal_matrix[3][3],
117         BMEdge *e_pair[2])
118 {
119         /* Always find one boundary edge (to determine winding)
120          * and one wire (if available), otherwise another boundary.
121          */
122
123         /* detect winding */
124         BMLoop *l_walk;
125         bool swap;
126
127         BLI_SMALLSTACK_DECLARE(edges_boundary, BMEdge *);
128         BLI_SMALLSTACK_DECLARE(edges_wire,     BMEdge *);
129         int edges_boundary_len = 0;
130         int edges_wire_len = 0;
131
132         {
133                 BMEdge *e, *e_first;
134                 e = e_first = v_init->e;
135                 do {
136                         if (BM_ELEM_API_FLAG_TEST(e, EDGE_NET)) {
137                                 const uint count = bm_edge_flagged_radial_count(e);
138                                 if (count == 1) {
139                                         BLI_SMALLSTACK_PUSH(edges_boundary, e);
140                                         edges_boundary_len++;
141                                 }
142                                 else if (count == 0) {
143                                         BLI_SMALLSTACK_PUSH(edges_wire, e);
144                                         edges_wire_len++;
145                                 }
146                         }
147                 } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
148         }
149
150         /* first edge should always be boundary */
151         if (edges_boundary_len == 0) {
152                 return false;
153         }
154         e_pair[0] = BLI_SMALLSTACK_POP(edges_boundary);
155
156         /* use to hold boundary OR wire edges */
157         BLI_SMALLSTACK_DECLARE(edges_search, BMEdge *);
158
159         /* attempt one boundary and one wire, or 2 boundary */
160         if (edges_wire_len == 0) {
161                 if (edges_boundary_len > 1) {
162                         e_pair[1] = BLI_SMALLSTACK_POP(edges_boundary);
163
164                         if (edges_boundary_len > 2) {
165                                 BLI_SMALLSTACK_SWAP(edges_search, edges_boundary);
166                         }
167                 }
168                 else {
169                         /* one boundary and no wire */
170                         return false;
171                 }
172         }
173         else {
174                 e_pair[1] = BLI_SMALLSTACK_POP(edges_wire);
175                 if (edges_wire_len > 1) {
176                         BLI_SMALLSTACK_SWAP(edges_search, edges_wire);
177                 }
178         }
179
180         /* if we swapped above, search this list for the best edge */
181         if (!BLI_SMALLSTACK_IS_EMPTY(edges_search)) {
182                 /* find the best edge in 'edge_list' to use for 'e_pair[1]' */
183                 const BMVert *v_prev = BM_edge_other_vert(e_pair[0], v_init);
184                 const BMVert *v_next = BM_edge_other_vert(e_pair[1], v_init);
185
186                 float dir_prev[2], dir_next[2];
187
188                 normalize_v2_m3_v3v3(dir_prev, face_normal_matrix, v_prev->co, v_init->co);
189                 normalize_v2_m3_v3v3(dir_next, face_normal_matrix, v_next->co, v_init->co);
190                 float angle_best_cos = dot_v2v2(dir_next, dir_prev);
191
192                 BMEdge *e;
193                 while ((e = BLI_SMALLSTACK_POP(edges_search))) {
194                         v_next = BM_edge_other_vert(e, v_init);
195                         float dir_test[2];
196
197                         normalize_v2_m3_v3v3(dir_test, face_normal_matrix, v_next->co, v_init->co);
198                         const float angle_test_cos = dot_v2v2(dir_prev, dir_test);
199
200                         if (angle_test_cos > angle_best_cos) {
201                                 angle_best_cos = angle_test_cos;
202                                 e_pair[1] = e;
203                         }
204                 }
205         }
206
207         /* flip based on winding */
208         l_walk = bm_edge_flagged_radial_first(e_pair[0]);
209         swap = false;
210         if (face_normal == l_walk->f->no) {
211                 swap = !swap;
212         }
213         if (l_walk->v != v_init) {
214                 swap = !swap;
215         }
216         if (swap) {
217                 SWAP(BMEdge *, e_pair[0], e_pair[1]);
218         }
219
220         return true;
221 }
222
223 /**
224  * A reduced version of #bm_face_split_edgenet_find_loop_pair
225  * that only checks if it would return true.
226  *
227  * \note There is no use in caching resulting edges here,
228  * since between this check and running #bm_face_split_edgenet_find_loop,
229  * the selected edges may have had faces attached.
230  */
231 static bool bm_face_split_edgenet_find_loop_pair_exists(
232         BMVert *v_init)
233 {
234         int edges_boundary_len = 0;
235         int edges_wire_len = 0;
236
237         {
238                 BMEdge *e, *e_first;
239                 e = e_first = v_init->e;
240                 do {
241                         if (BM_ELEM_API_FLAG_TEST(e, EDGE_NET)) {
242                                 const uint count = bm_edge_flagged_radial_count(e);
243                                 if (count == 1) {
244                                         edges_boundary_len++;
245                                 }
246                                 else if (count == 0) {
247                                         edges_wire_len++;
248                                 }
249                         }
250                 } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
251         }
252
253         /* first edge should always be boundary */
254         if (edges_boundary_len == 0) {
255                 return false;
256         }
257
258         /* attempt one boundary and one wire, or 2 boundary */
259         if (edges_wire_len == 0) {
260                 if (edges_boundary_len >= 2) {
261                         /* pass */
262                 }
263                 else {
264                         /* one boundary and no wire */
265                         return false;
266                 }
267         }
268         else {
269                 /* pass */
270         }
271
272         return true;
273 }
274
275 static bool bm_face_split_edgenet_find_loop_walk(
276         BMVert *v_init, const float face_normal[3],
277         /* cache to avoid realloc every time */
278         struct VertOrder *edge_order, const uint edge_order_len,
279         BMEdge *e_pair[2])
280 {
281         /* fast-path for the common case (avoid push-pop).
282          * Also avoids tagging as visited since we know we
283          * can't reach these verts some other way */
284 #define USE_FASTPATH_NOFORK
285
286         BMVert *v;
287         BMVert *v_dst;
288         bool found = false;
289
290         struct VertOrder *eo;
291         STACK_DECLARE(edge_order);
292
293         /* store visited verts so we can clear the visit flag after execution */
294         BLI_SMALLSTACK_DECLARE(vert_visit, BMVert *);
295
296         /* likely this will stay very small
297          * all verts pushed into this stack _must_ have their previous edges set! */
298         BLI_SMALLSTACK_DECLARE(vert_stack, BMVert *);
299         BLI_SMALLSTACK_DECLARE(vert_stack_next, BMVert *);
300
301         STACK_INIT(edge_order, edge_order_len);
302
303         /* start stepping */
304         v = BM_edge_other_vert(e_pair[0], v_init);
305         v->e = e_pair[0];
306         BLI_SMALLSTACK_PUSH(vert_stack, v);
307
308         v_dst = BM_edge_other_vert(e_pair[1], v_init);
309
310 #ifdef DEBUG_PRINT
311         printf("%s: vert (search) %d\n", __func__, BM_elem_index_get(v_init));
312 #endif
313
314         /* This loop will keep stepping over the best possible edge,
315          * in most cases it finds the direct route to close the face.
316          *
317          * In cases where paths can't be closed,
318          * alternatives are stored in the 'vert_stack'.
319          */
320         while ((v = BLI_SMALLSTACK_POP_EX(vert_stack, vert_stack_next))) {
321 #ifdef USE_FASTPATH_NOFORK
322 walk_nofork:
323 #else
324                 BLI_SMALLSTACK_PUSH(vert_visit, v);
325                 BM_ELEM_API_FLAG_ENABLE(v, VERT_VISIT);
326 #endif
327
328                 BLI_assert(STACK_SIZE(edge_order) == 0);
329
330                 /* check if we're done! */
331                 if (v == v_dst) {
332                         found = true;
333                         goto finally;
334                 }
335
336                 BMEdge *e_next, *e_first;
337                 e_first = v->e;
338                 e_next = BM_DISK_EDGE_NEXT(e_first, v);  /* always skip this verts edge */
339
340                 /* in rare cases there may be edges with a single connecting vertex */
341                 if (e_next != e_first) {
342                         do {
343                                 if ((BM_ELEM_API_FLAG_TEST(e_next, EDGE_NET)) &&
344                                     (bm_edge_flagged_radial_count(e_next) < 2))
345                                 {
346                                         BMVert *v_next;
347
348                                         v_next = BM_edge_other_vert(e_next, v);
349                                         BLI_assert(v->e != e_next);
350
351 #ifdef DEBUG_PRINT
352                                         /* indent and print */
353                                         {
354                                                 BMVert *_v = v;
355                                                 do {
356                                                         printf("  ");
357                                                 } while ((_v = BM_edge_other_vert(_v->e, _v)) != v_init);
358                                                 printf("vert %d -> %d (add=%d)\n",
359                                                        BM_elem_index_get(v), BM_elem_index_get(v_next),
360                                                        BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT) == 0);
361                                         }
362 #endif
363
364                                         if (!BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT)) {
365                                                 eo = STACK_PUSH_RET_PTR(edge_order);
366                                                 eo->v = v_next;
367
368                                                 v_next->e = e_next;
369                                         }
370                                 }
371                         } while ((e_next = BM_DISK_EDGE_NEXT(e_next, v)) != e_first);
372                 }
373
374 #ifdef USE_FASTPATH_NOFORK
375                 if (STACK_SIZE(edge_order) == 1) {
376                         eo = STACK_POP_PTR(edge_order);
377                         v = eo->v;
378
379                         goto walk_nofork;
380                 }
381 #endif
382
383                 /* sort by angle if needed */
384                 if (STACK_SIZE(edge_order) > 1) {
385                         uint j;
386                         BMVert *v_prev = BM_edge_other_vert(v->e, v);
387
388                         for (j = 0; j < STACK_SIZE(edge_order); j++) {
389                                 edge_order[j].angle = angle_signed_on_axis_v3v3v3_v3(v_prev->co, v->co, edge_order[j].v->co, face_normal);
390                         }
391                         qsort(edge_order, STACK_SIZE(edge_order), sizeof(struct VertOrder), BLI_sortutil_cmp_float_reverse);
392
393 #ifdef USE_FASTPATH_NOFORK
394                         /* only tag forks */
395                         BLI_SMALLSTACK_PUSH(vert_visit, v);
396                         BM_ELEM_API_FLAG_ENABLE(v, VERT_VISIT);
397 #endif
398                 }
399
400                 while ((eo = STACK_POP_PTR(edge_order))) {
401                         BLI_SMALLSTACK_PUSH(vert_stack_next, eo->v);
402                 }
403
404                 if (!BLI_SMALLSTACK_IS_EMPTY(vert_stack_next)) {
405                         BLI_SMALLSTACK_SWAP(vert_stack, vert_stack_next);
406                 }
407         }
408
409
410 finally:
411         /* clear flag for next execution */
412         while ((v = BLI_SMALLSTACK_POP(vert_visit))) {
413                 BM_ELEM_API_FLAG_DISABLE(v, VERT_VISIT);
414         }
415
416         return found;
417
418 #undef USE_FASTPATH_NOFORK
419 }
420
421 static bool bm_face_split_edgenet_find_loop(
422         BMVert *v_init, const float face_normal[3], float face_normal_matrix[3][3],
423         /* cache to avoid realloc every time */
424         struct VertOrder *edge_order, const uint edge_order_len,
425         BMVert **r_face_verts, int *r_face_verts_len)
426 {
427         BMEdge *e_pair[2];
428         BMVert *v;
429
430         if (!bm_face_split_edgenet_find_loop_pair(v_init, face_normal, face_normal_matrix, e_pair)) {
431                 return false;
432         }
433
434         BLI_assert((bm_edge_flagged_radial_count(e_pair[0]) == 1) ||
435                    (bm_edge_flagged_radial_count(e_pair[1]) == 1));
436
437         if (bm_face_split_edgenet_find_loop_walk(v_init, face_normal, edge_order, edge_order_len, e_pair)) {
438                 uint i = 0;
439
440                 r_face_verts[i++] = v_init;
441                 v = BM_edge_other_vert(e_pair[1], v_init);
442                 do {
443                         r_face_verts[i++] = v;
444                 } while ((v = BM_edge_other_vert(v->e, v)) != v_init);
445                 *r_face_verts_len = i;
446                 return (i > 2) ? true : false;
447         }
448         else {
449                 return false;
450         }
451 }
452
453 /**
454  * Splits a face into many smaller faces defined by an edge-net.
455  * handle customdata and degenerate cases.
456  *
457  * - isolated holes or unsupported face configurations, will be ignored.
458  * - customdata calculations aren't efficient
459  *   (need to calculate weights for each vert).
460  */
461 bool BM_face_split_edgenet(
462         BMesh *bm,
463         BMFace *f, BMEdge **edge_net, const int edge_net_len,
464         BMFace ***r_face_arr, int *r_face_arr_len)
465 {
466         /* re-use for new face verts */
467         BMVert **face_verts;
468         int      face_verts_len;
469
470         BMFace **face_arr = NULL;
471         BLI_array_declare(face_arr);
472
473         BMVert **vert_queue;
474         STACK_DECLARE(vert_queue);
475         int i;
476
477         struct VertOrder *edge_order;
478         const uint edge_order_len = edge_net_len + 2;
479
480         BMVert *v;
481
482         BMLoop *l_iter, *l_first;
483
484
485         if (!edge_net_len) {
486                 if (r_face_arr) {
487                         *r_face_arr = NULL;
488                         *r_face_arr_len = 0;
489                 }
490                 return false;
491         }
492
493         /* over-alloc (probably 2-4 is only used in most cases), for the biggest-fan */
494         edge_order = BLI_array_alloca(edge_order, edge_order_len);
495
496         /* use later */
497         face_verts = BLI_array_alloca(face_verts, edge_net_len + f->len);
498
499         vert_queue = BLI_array_alloca(vert_queue, edge_net_len + f->len);
500         STACK_INIT(vert_queue, f->len + edge_net_len);
501
502         BLI_assert(BM_ELEM_API_FLAG_TEST(f, FACE_NET) == 0);
503         BM_ELEM_API_FLAG_ENABLE(f, FACE_NET);
504
505 #ifdef DEBUG
506         for (i = 0; i < edge_net_len; i++) {
507                 BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
508                 BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
509         }
510         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
511         do {
512                 BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter->e, EDGE_NET) == 0);
513         } while ((l_iter = l_iter->next) != l_first);
514 #endif
515
516         /* Note: 'VERT_IN_QUEUE' is often not needed at all,
517          * however in rare cases verts are added multiple times to the queue,
518          * that on it's own is harmless but in _very_ rare cases,
519          * the queue will overflow its maximum size,
520          * so we better be strict about this! see: T51539 */
521
522         for (i = 0; i < edge_net_len; i++) {
523                 BM_ELEM_API_FLAG_ENABLE(edge_net[i], EDGE_NET);
524                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v1, VERT_IN_QUEUE);
525                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v2, VERT_IN_QUEUE);
526         }
527         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
528         do {
529                 BM_ELEM_API_FLAG_ENABLE(l_iter->e, EDGE_NET);
530                 BM_ELEM_API_FLAG_DISABLE(l_iter->v, VERT_IN_QUEUE);
531         } while ((l_iter = l_iter->next) != l_first);
532
533         float face_normal_matrix[3][3];
534         axis_dominant_v3_to_m3(face_normal_matrix, f->no);
535
536
537         /* any vert can be used to begin with */
538         STACK_PUSH(vert_queue, l_first->v);
539         BM_ELEM_API_FLAG_ENABLE(l_first->v, VERT_IN_QUEUE);
540
541         while ((v = STACK_POP(vert_queue))) {
542                 BM_ELEM_API_FLAG_DISABLE(v, VERT_IN_QUEUE);
543                 if (bm_face_split_edgenet_find_loop(
544                         v, f->no, face_normal_matrix,
545                         edge_order, edge_order_len, face_verts, &face_verts_len))
546                 {
547                         BMFace *f_new;
548
549                         f_new = BM_face_create_verts(bm, face_verts, face_verts_len, f, BM_CREATE_NOP, false);
550
551                         for (i = 0; i < edge_net_len; i++) {
552                                 BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET));
553                         }
554
555                         if (f_new) {
556                                 BLI_array_append(face_arr, f_new);
557                                 copy_v3_v3(f_new->no, f->no);
558
559                                 /* warning, normally don't do this,
560                                  * its needed for mesh intersection - which tracks face-sides based on selection */
561                                 f_new->head.hflag = f->head.hflag;
562                                 if (f->head.hflag & BM_ELEM_SELECT) {
563                                         bm->totfacesel++;
564                                 }
565
566                                 BM_ELEM_API_FLAG_ENABLE(f_new, FACE_NET);
567
568                                 /* add new verts to keep finding loops for
569                                  * (verts between boundary and manifold edges) */
570                                 l_iter = l_first = BM_FACE_FIRST_LOOP(f_new);
571                                 do {
572                                         /* Avoid adding to queue multiple times (not common but happens). */
573                                         if (!BM_ELEM_API_FLAG_TEST(l_iter->v, VERT_IN_QUEUE) &&
574                                             bm_face_split_edgenet_find_loop_pair_exists(l_iter->v))
575                                         {
576                                                 STACK_PUSH(vert_queue, l_iter->v);
577                                                 BM_ELEM_API_FLAG_ENABLE(l_iter->v, VERT_IN_QUEUE);
578                                         }
579                                 } while ((l_iter = l_iter->next) != l_first);
580                         }
581                 }
582         }
583
584
585         if (CustomData_has_math(&bm->ldata)) {
586                 /* reuse VERT_VISIT here to tag vert's already interpolated */
587                 BMIter iter;
588                 BMLoop *l_other;
589
590                 /* see: #BM_loop_interp_from_face for similar logic  */
591                 void **blocks   = BLI_array_alloca(blocks, f->len);
592                 float (*cos_2d)[2] = BLI_array_alloca(cos_2d, f->len);
593                 float *w        = BLI_array_alloca(w, f->len);
594                 float axis_mat[3][3];
595                 float co[2];
596
597                 /* interior loops */
598                 axis_dominant_v3_to_m3(axis_mat, f->no);
599
600
601                 /* first simply copy from existing face */
602                 i = 0;
603                 l_iter = l_first = BM_FACE_FIRST_LOOP(f);
604                 do {
605                         BM_ITER_ELEM (l_other, &iter, l_iter->v, BM_LOOPS_OF_VERT) {
606                                 if ((l_other->f != f) && BM_ELEM_API_FLAG_TEST(l_other->f, FACE_NET)) {
607                                         CustomData_bmesh_copy_data(&bm->ldata, &bm->ldata,
608                                                                    l_iter->head.data, &l_other->head.data);
609                                 }
610                         }
611                         /* tag not to interpolate */
612                         BM_ELEM_API_FLAG_ENABLE(l_iter->v, VERT_VISIT);
613
614
615                         mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
616                         blocks[i] = l_iter->head.data;
617
618                 } while ((void)i++, (l_iter = l_iter->next) != l_first);
619
620
621                 for (i = 0; i < edge_net_len; i++) {
622                         BM_ITER_ELEM (v, &iter, edge_net[i], BM_VERTS_OF_EDGE) {
623                                 if (!BM_ELEM_API_FLAG_TEST(v, VERT_VISIT)) {
624                                         BMIter liter;
625
626                                         BM_ELEM_API_FLAG_ENABLE(v, VERT_VISIT);
627
628                                         /* interpolate this loop, then copy to the rest */
629                                         l_first = NULL;
630
631                                         BM_ITER_ELEM (l_iter, &liter, v, BM_LOOPS_OF_VERT) {
632                                                 if (BM_ELEM_API_FLAG_TEST(l_iter->f, FACE_NET)) {
633                                                         if (l_first == NULL) {
634                                                                 mul_v2_m3v3(co, axis_mat, v->co);
635                                                                 interp_weights_poly_v2(w, cos_2d, f->len, co);
636                                                                 CustomData_bmesh_interp(
637                                                                         &bm->ldata, (const void **)blocks,
638                                                                         w, NULL, f->len, l_iter->head.data);
639                                                                 l_first = l_iter;
640                                                         }
641                                                         else {
642                                                                 CustomData_bmesh_copy_data(&bm->ldata, &bm->ldata,
643                                                                                            l_first->head.data, &l_iter->head.data);
644                                                         }
645                                                 }
646                                         }
647                                 }
648                         }
649                 }
650         }
651
652
653
654         /* cleanup */
655         for (i = 0; i < edge_net_len; i++) {
656                 BM_ELEM_API_FLAG_DISABLE(edge_net[i], EDGE_NET);
657                 /* from interp only */
658                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v1, VERT_VISIT);
659                 BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v2, VERT_VISIT);
660         }
661         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
662         do {
663                 BM_ELEM_API_FLAG_DISABLE(l_iter->e, EDGE_NET);
664                 /* from interp only */
665                 BM_ELEM_API_FLAG_DISABLE(l_iter->v, VERT_VISIT);
666         } while ((l_iter = l_iter->next) != l_first);
667
668         if (BLI_array_count(face_arr)) {
669                 bmesh_face_swap_data(f, face_arr[0]);
670                 BM_face_kill(bm, face_arr[0]);
671                 face_arr[0] = f;
672         }
673         else {
674                 BM_ELEM_API_FLAG_DISABLE(f, FACE_NET);
675         }
676
677         for (i = 0; i < BLI_array_count(face_arr); i++) {
678                 BM_ELEM_API_FLAG_DISABLE(face_arr[i], FACE_NET);
679         }
680
681         if (r_face_arr) {
682                 *r_face_arr = face_arr;
683                 *r_face_arr_len = BLI_array_count(face_arr);
684         }
685         else {
686                 if (face_arr) {
687                         MEM_freeN(face_arr);
688                 }
689         }
690
691         return true;
692 }
693
694 #undef FACE_NET
695 #undef VERT_VISIT
696 #undef EDGE_NET
697
698 /** \} */
699
700
701 /* -------------------------------------------------------------------- */
702 /* Face Split Edge-Net Connect Islands */
703
704 /** \name BM_face_split_edgenet_connect_islands and helper functions.
705  *
706  * Connect isolated mesh 'islands' so they form legal regions from which we can create faces.
707  *
708  * Intended to be used as a pre-processing step for #BM_face_split_edgenet.
709  *
710  * \warning Currently this risks running out of stack memory (#alloca),
711  * likely we'll pass in a memory arena (cleared each use) eventually.
712  *
713  * \{ */
714
715
716 #define USE_PARTIAL_CONNECT
717
718
719 #define VERT_IS_VALID BM_ELEM_INTERNAL_TAG
720
721 /* can be X or Y */
722 #define SORT_AXIS 0
723
724 BLI_INLINE bool edge_isect_verts_point_2d(
725         const BMEdge *e, const BMVert *v_a, const BMVert *v_b,
726         float r_isect[2])
727 {
728         return ((isect_seg_seg_v2_point(v_a->co, v_b->co, e->v1->co, e->v2->co, r_isect) == 1) &&
729                 ((e->v1 != v_a) && (e->v2 != v_a) && (e->v1 != v_b)  && (e->v2 != v_b)));
730 }
731
732 /**
733  * Represents isolated edge-links,
734  * each island owns contiguous slices of the vert array.
735  * (edges remain in `edge_links`).
736  */
737 struct EdgeGroupIsland {
738         LinkNode edge_links;  /* keep first */
739         uint vert_len, edge_len;
740
741         /* Set the following vars once we have >1 groups */
742
743         /* when when an edge in a previous group connects to this one,
744          * so theres no need to create one pointing back. */
745         uint has_prev_edge : 1;
746
747         /* verts in the group which has the lowest & highest values,
748          * the lower vertex is connected to the first edge */
749         struct {
750                 BMVert *min, *max;
751                 /* used for sorting only */
752                 float min_axis;
753         } vert_span;
754 };
755
756 static int group_min_cmp_fn(const void *p1, const void *p2)
757 {
758         const struct EdgeGroupIsland *g1 = *(struct EdgeGroupIsland **)p1;
759         const struct EdgeGroupIsland *g2 = *(struct EdgeGroupIsland **)p2;
760         /* min->co[SORT_AXIS] hasn't been applied yet */
761         const float f1 = g1->vert_span.min_axis;
762         const float f2 = g2->vert_span.min_axis;
763
764         if (f1 < f2) return -1;
765         if (f1 > f2) return  1;
766         else         return  0;
767 }
768
769 struct Edges_VertVert_BVHTreeTest {
770         float dist_orig;
771         BMEdge **edge_arr;
772
773         BMVert *v_origin;
774         BMVert *v_other;
775
776         const uint *vert_range;
777 };
778
779 struct Edges_VertRay_BVHTreeTest {
780         BMEdge **edge_arr;
781
782         BMVert *v_origin;
783
784         const uint *vert_range;
785 };
786
787 static void bvhtree_test_edges_isect_2d_vert_cb(
788         void *user_data, int index, const BVHTreeRay *UNUSED(ray), BVHTreeRayHit *hit)
789 {
790         struct Edges_VertVert_BVHTreeTest *data = user_data;
791         const BMEdge *e = data->edge_arr[index];
792         const int v1_index = BM_elem_index_get(e->v1);
793         float co_isect[2];
794
795         if (edge_isect_verts_point_2d(e, data->v_origin, data->v_other, co_isect)) {
796                 const float t = line_point_factor_v2(co_isect, data->v_origin->co, data->v_other->co);
797                 const float dist_new = data->dist_orig * t;
798                 /* avoid float precision issues, possible this is greater,
799                  * check above zero to allow some overlap
800                  * (and needed for partial-connect which will overlap vertices) */
801                 if (LIKELY((dist_new < hit->dist) && (dist_new > 0.0f))) {
802                         /* v1/v2 will both be in the same group */
803                         if (v1_index <  (int)data->vert_range[0] ||
804                             v1_index >= (int)data->vert_range[1])
805                         {
806                                 hit->dist = dist_new;
807                                 hit->index = index;
808                         }
809                 }
810         }
811 }
812
813 static void bvhtree_test_edges_isect_2d_ray_cb(
814         void *user_data, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit)
815 {
816         struct Edges_VertRay_BVHTreeTest *data = user_data;
817         const BMEdge *e = data->edge_arr[index];
818
819         /* direction is normalized, so this will be the distance */
820         float dist_new;
821         if (isect_ray_seg_v2(data->v_origin->co, ray->direction, e->v1->co, e->v2->co, &dist_new, NULL)) {
822                 /* avoid float precision issues, possible this is greater,
823                  * check above zero to allow some overlap
824                  * (and needed for partial-connect which will overlap vertices) */
825                 if (LIKELY(dist_new < hit->dist && (dist_new > 0.0f))) {
826                         if (e->v1 != data->v_origin && e->v2 != data->v_origin) {
827                                 const int v1_index = BM_elem_index_get(e->v1);
828                                 /* v1/v2 will both be in the same group */
829                                 if (v1_index <  (int)data->vert_range[0] ||
830                                     v1_index >= (int)data->vert_range[1])
831                                 {
832                                         hit->dist = dist_new;
833                                         hit->index = index;
834                                 }
835                         }
836                 }
837         }
838 }
839
840 /**
841  * Store values for:
842  * - #bm_face_split_edgenet_find_connection
843  * - #test_edges_isect_2d
844  * ... which don't change each call.
845  */
846 struct EdgeGroup_FindConnection_Args {
847         BVHTree *bvhtree;
848         BMEdge **edge_arr;
849         uint edge_arr_len;
850
851         BMEdge **edge_arr_new;
852         uint edge_arr_new_len;
853
854         const uint *vert_range;
855 };
856
857 static BMEdge *test_edges_isect_2d_vert(
858         const struct EdgeGroup_FindConnection_Args *args,
859         BMVert *v_origin, BMVert *v_other)
860 {
861         int index;
862
863         BVHTreeRayHit hit = {0};
864         float dir[3];
865
866         sub_v2_v2v2(dir, v_other->co, v_origin->co);
867         dir[2] = 0.0f;
868         hit.index = -1;
869         hit.dist = normalize_v2(dir);
870
871         struct Edges_VertVert_BVHTreeTest user_data = {0};
872         user_data.dist_orig = hit.dist;
873         user_data.edge_arr = args->edge_arr;
874         user_data.v_origin = v_origin;
875         user_data.v_other = v_other;
876         user_data.vert_range = args->vert_range;
877
878         index = BLI_bvhtree_ray_cast_ex(
879                 args->bvhtree, v_origin->co, dir, 0.0f, &hit,
880                 bvhtree_test_edges_isect_2d_vert_cb, &user_data, 0);
881
882         BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
883
884         /* check existing connections (no spatial optimization here since we're continually adding). */
885         if (LIKELY(index == -1)) {
886                 float t_best = 1.0f;
887                 for (uint i = 0; i < args->edge_arr_new_len; i++) {
888                         float co_isect[2];
889                         if (UNLIKELY(edge_isect_verts_point_2d(args->edge_arr_new[i], v_origin, v_other, co_isect))) {
890                                 const float t_test = line_point_factor_v2(co_isect, v_origin->co, v_other->co);
891                                 if (t_test < t_best) {
892                                         t_best = t_test;
893
894                                         e_hit = args->edge_arr_new[i];
895                                 }
896                         }
897                 }
898         }
899
900         return e_hit;
901 }
902
903 /**
904  * Similar to #test_edges_isect_2d_vert but we're casting into a direction,
905  * (not to a vertex)
906  */
907 static BMEdge *test_edges_isect_2d_ray(
908         const struct EdgeGroup_FindConnection_Args *args,
909         BMVert *v_origin, const float dir[3])
910 {
911         int index;
912         BVHTreeRayHit hit = {0};
913
914         BLI_ASSERT_UNIT_V2(dir);
915
916         hit.index = -1;
917         hit.dist = BVH_RAYCAST_DIST_MAX;
918
919         struct Edges_VertRay_BVHTreeTest user_data = {0};
920         user_data.edge_arr = args->edge_arr;
921         user_data.v_origin = v_origin;
922         user_data.vert_range = args->vert_range;
923
924         index = BLI_bvhtree_ray_cast_ex(
925                 args->bvhtree, v_origin->co, dir, 0.0f, &hit,
926                 bvhtree_test_edges_isect_2d_ray_cb, &user_data, 0);
927
928         BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
929
930         /* check existing connections (no spatial optimization here since we're continually adding). */
931         if (LIKELY(index != -1)) {
932                 for (uint i = 0; i < args->edge_arr_new_len; i++) {
933                         BMEdge *e = args->edge_arr_new[i];
934                         float dist_new;
935                         if (isect_ray_seg_v2(v_origin->co, dir, e->v1->co, e->v2->co, &dist_new, NULL)) {
936                                 if (e->v1 != v_origin && e->v2 != v_origin) {
937                                         /* avoid float precision issues, possible this is greater */
938                                         if (LIKELY(dist_new < hit.dist)) {
939                                                 hit.dist = dist_new;
940
941                                                 e_hit = args->edge_arr_new[i];
942                                         }
943                                 }
944                         }
945                 }
946         }
947
948         return e_hit;
949 }
950
951 static int bm_face_split_edgenet_find_connection(
952         const struct EdgeGroup_FindConnection_Args *args,
953         BMVert *v_origin,
954         /* false = negative, true = positive */
955         bool direction_sign)
956 {
957         /**
958          * Method for finding connection is as follows:
959          *
960          * - Cast a ray along either the positive or negative directions.
961          * - Take the hit-edge, and cast rays to their vertices checking those rays don't intersect a closer edge.
962          * - Keep taking the hit-edge and testing its verts until a vertex is found which isn't blocked by an edge.
963          *
964          * \note It's possible none of the verts can be accessed (with self-intersecting lines).
965          * In that case theres no right answer (without subdividing edges),
966          * so return a fall-back vertex in that case.
967          */
968
969         const float dir[3] = {[SORT_AXIS] = direction_sign ? 1.0f : -1.0f};
970         BMEdge *e_hit = test_edges_isect_2d_ray(args, v_origin, dir);
971         BMVert *v_other = NULL;
972
973         if (e_hit) {
974                 BMVert *v_other_fallback = NULL;
975
976                 BLI_SMALLSTACK_DECLARE(vert_search, BMVert *);
977
978                 /* ensure we never add verts multiple times (not all that likely - but possible) */
979                 BLI_SMALLSTACK_DECLARE(vert_blacklist, BMVert *);
980
981                 do {
982                         BMVert *v_pair[2];
983                         /* ensure the closest vertex is popped back off the stack first */
984                         if (len_squared_v2v2(v_origin->co, e_hit->v1->co) >
985                             len_squared_v2v2(v_origin->co, e_hit->v2->co))
986                         {
987                                 ARRAY_SET_ITEMS(v_pair, e_hit->v1, e_hit->v2);
988                         }
989                         else {
990                                 ARRAY_SET_ITEMS(v_pair, e_hit->v2, e_hit->v1);
991                         }
992
993                         for (int j = 0; j < 2; j++) {
994                                 BMVert *v_iter = v_pair[j];
995                                 if (BM_elem_flag_test(v_iter, VERT_IS_VALID)) {
996                                         if (direction_sign ? (v_iter->co[SORT_AXIS] >= v_origin->co[SORT_AXIS]) :
997                                                              (v_iter->co[SORT_AXIS] <= v_origin->co[SORT_AXIS]))
998                                         {
999                                                 BLI_SMALLSTACK_PUSH(vert_search, v_iter);
1000                                                 BLI_SMALLSTACK_PUSH(vert_blacklist, v_iter);
1001                                                 BM_elem_flag_disable(v_iter, VERT_IS_VALID);
1002                                         }
1003                                 }
1004                         }
1005                         v_other_fallback = v_other;
1006
1007                 } while ((v_other = BLI_SMALLSTACK_POP(vert_search)) &&
1008                          (e_hit   = test_edges_isect_2d_vert(args, v_origin, v_other)));
1009
1010                 if (v_other == NULL) {
1011                         printf("Using fallback\n");
1012                         v_other = v_other_fallback;
1013                 }
1014
1015                 /* reset the blacklist flag, for future use */
1016                 BMVert *v;
1017                 while ((v = BLI_SMALLSTACK_POP(vert_blacklist))) {
1018                         BM_elem_flag_enable(v, VERT_IS_VALID);
1019                 }
1020         }
1021
1022         /* if we reach this line, v_other is either the best vertex or its NULL */
1023         return v_other ? BM_elem_index_get(v_other) : -1;
1024 }
1025
1026
1027 /**
1028  * Support for connecting islands that have single-edge connections.
1029  * This options is not very optimal (however its not needed for booleans either).
1030  */
1031 #ifdef USE_PARTIAL_CONNECT
1032
1033 /**
1034  * Split vertices which are part of a partial connection
1035  * (only a single vertex connecting an island).
1036  *
1037  * \note All edges and vertices must have their #BM_ELEM_INTERNAL_TAG flag enabled.
1038  * This function leaves all the flags set as well.
1039  *
1040  */
1041 static BMVert *bm_face_split_edgenet_partial_connect(BMesh *bm, BMVert *v_delimit)
1042 {
1043         /* -------------------------------------------------------------------- */
1044         /* Initial check that we may be a delimiting vert (keep this fast) */
1045
1046         /* initial check - see if we have 3+ flagged edges attached to 'v_delimit'
1047          * if not, we can early exit */
1048         LinkNode *e_delimit_list = NULL;
1049         uint e_delimit_list_len = 0;
1050
1051 #define EDGE_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1052 #define VERT_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1053
1054 #define FOREACH_VERT_EDGE(v_, e_, body_) { \
1055         BMEdge *e_ = v_->e; \
1056         do { body_ } while ((e_ = BM_DISK_EDGE_NEXT(e_, v_)) != v_->e); \
1057 } ((void)0)
1058
1059         /* start with face edges, since we need to split away wire-only edges */
1060         BMEdge *e_face_init = NULL;
1061
1062         FOREACH_VERT_EDGE(v_delimit, e_iter, {
1063                 if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1064                         BLI_assert(BM_elem_flag_test(BM_edge_other_vert(e_iter, v_delimit), VERT_NOT_IN_STACK));
1065                         BLI_linklist_prepend_alloca(&e_delimit_list, e_iter);
1066                         e_delimit_list_len++;
1067                         if (e_iter->l != NULL) {
1068                                 e_face_init = e_iter;
1069                         }
1070                 }
1071         });
1072
1073         /* skip typical edge-chain verts */
1074         if (LIKELY(e_delimit_list_len <= 2)) {
1075                 return NULL;
1076         }
1077
1078
1079         /* -------------------------------------------------------------------- */
1080         /* Complicated stuff starts now! */
1081
1082
1083         /* Store connected vertices for restoring the flag */
1084         LinkNode *vert_stack = NULL;
1085         BLI_linklist_prepend_alloca(&vert_stack, v_delimit);
1086         BM_elem_flag_disable(v_delimit, VERT_NOT_IN_STACK);
1087
1088         /* Walk the net... */
1089         {
1090                 BLI_SMALLSTACK_DECLARE(search, BMVert *);
1091                 BMVert *v_other = BM_edge_other_vert(e_face_init ? e_face_init : v_delimit->e, v_delimit);
1092
1093                 BLI_SMALLSTACK_PUSH(search, v_other);
1094                 BM_elem_flag_disable(v_other, VERT_NOT_IN_STACK);
1095
1096                 while ((v_other = BLI_SMALLSTACK_POP(search))) {
1097                         BLI_assert(BM_elem_flag_test(v_other, VERT_NOT_IN_STACK) == false);
1098                         BLI_linklist_prepend_alloca(&vert_stack, v_other);
1099                         BMEdge *e_iter = v_other->e;
1100                         do {
1101                                 BMVert *v_step = BM_edge_other_vert(e_iter, v_other);
1102                                 if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1103                                         if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK)) {
1104                                                 BM_elem_flag_disable(v_step, VERT_NOT_IN_STACK);
1105                                                 BLI_SMALLSTACK_PUSH(search, v_step);
1106                                         }
1107                                 }
1108                         } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_other)) != v_other->e);
1109                 }
1110         }
1111
1112         /* Detect if this is a delimiter by checking if we didn't walk any of edges connected to 'v_delimit' */
1113         bool is_delimit = false;
1114         FOREACH_VERT_EDGE(v_delimit, e_iter, {
1115                 BMVert *v_step = BM_edge_other_vert(e_iter, v_delimit);
1116                 if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK) && BM_edge_is_wire(e_iter)) {
1117                         is_delimit = true;  /* if one vertex is valid - we have a mix */
1118                 }
1119                 else {
1120                         /* match the vertex flag (only for edges around 'v_delimit') */
1121                         BM_elem_flag_disable(e_iter, EDGE_NOT_IN_STACK);
1122                 }
1123         });
1124
1125 #undef FOREACH_VERT_EDGE
1126
1127         /* Execute the split */
1128         BMVert *v_split = NULL;
1129         if (is_delimit) {
1130                 v_split = BM_vert_create(bm, v_delimit->co, NULL, 0);
1131                 BM_vert_separate_wire_hflag(bm, v_split, v_delimit, EDGE_NOT_IN_STACK);
1132                 BM_elem_flag_enable(v_split, VERT_NOT_IN_STACK);
1133
1134                 BLI_assert(v_delimit->e != NULL);
1135                 BLI_assert(v_split->e != NULL);
1136         }
1137
1138         /* Restore flags */
1139         do {
1140                 BM_elem_flag_enable((BMVert *)vert_stack->link, VERT_NOT_IN_STACK);
1141         } while ((vert_stack = vert_stack->next));
1142
1143         do {
1144                 BM_elem_flag_enable((BMEdge *)e_delimit_list->link, EDGE_NOT_IN_STACK);
1145         } while ((e_delimit_list = e_delimit_list->next));
1146
1147 #undef EDGE_NOT_IN_STACK
1148 #undef VERT_NOT_IN_STACK
1149
1150         return v_split;
1151 }
1152
1153
1154 /**
1155  * Check if connecting vertices would cause an edge with duplicate verts.
1156  */
1157 static bool bm_vert_partial_connect_check_overlap(
1158         const int *remap,
1159         const int v_a_index, const int v_b_index)
1160 {
1161         /* connected to eachother */
1162         if (UNLIKELY((remap[v_a_index] == v_b_index) ||
1163                      (remap[v_b_index] == v_a_index)))
1164         {
1165                 return true;
1166         }
1167         else {
1168                 return false;
1169         }
1170 }
1171
1172
1173 #endif  /* USE_PARTIAL_CONNECT */
1174
1175
1176
1177 /**
1178  * For when the edge-net has holes in it-this connects them.
1179  *
1180  * \param use_partial_connect: Support for handling islands connected by only a single edge,
1181  * \note that this is quite slow so avoid using where possible.
1182  * \param mem_arena: Avoids many small allocs & should be cleared after each use.
1183  * take care since \a r_edge_net_new is stored in \a r_edge_net_new.
1184  */
1185 bool BM_face_split_edgenet_connect_islands(
1186         BMesh *bm,
1187         BMFace *f, BMEdge **edge_net_init, const uint edge_net_init_len,
1188         bool use_partial_connect,
1189         MemArena *mem_arena,
1190         BMEdge ***r_edge_net_new, uint *r_edge_net_new_len)
1191 {
1192         /* -------------------------------------------------------------------- */
1193         /* This function has 2 main parts.
1194          *
1195          * - Check if there are any holes.
1196          * - Connect the holes with edges (if any are found).
1197          *
1198          * Keep the first part fast since it will run very often for edge-nets that have no holes.
1199          *
1200          * \note Don't use the mem_arena unless he have holes to fill.
1201          * (avoid thrashing the area when the initial check isn't so intensive on the stack).
1202          */
1203
1204         const uint edge_arr_len = (uint)edge_net_init_len + (uint)f->len;
1205         BMEdge **edge_arr = BLI_array_alloca(edge_arr, edge_arr_len);
1206         bool ok = false;
1207
1208         memcpy(edge_arr, edge_net_init, sizeof(*edge_arr) * (size_t)edge_net_init_len);
1209
1210         /* _must_ clear on exit */
1211 #define EDGE_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1212 #define VERT_NOT_IN_STACK  BM_ELEM_INTERNAL_TAG
1213
1214         {
1215                 uint i = edge_net_init_len;
1216                 BMLoop *l_iter, *l_first;
1217                 l_iter = l_first = BM_FACE_FIRST_LOOP(f);
1218                 do {
1219                         edge_arr[i++] = l_iter->e;
1220                 } while ((l_iter = l_iter->next) != l_first);
1221                 BLI_assert(i == edge_arr_len);
1222         }
1223
1224         for (uint i = 0; i < edge_arr_len; i++) {
1225                 BM_elem_flag_enable(edge_arr[i], EDGE_NOT_IN_STACK);
1226                 BM_elem_flag_enable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
1227                 BM_elem_flag_enable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
1228         }
1229
1230
1231
1232 #ifdef USE_PARTIAL_CONNECT
1233         /* Split-out delimiting vertices */
1234         struct TempVertPair {
1235                 struct TempVertPair *next;
1236                 BMVert *v_temp;
1237                 BMVert *v_orig;
1238         };
1239
1240         struct {
1241                 struct TempVertPair *list;
1242                 uint len;
1243                 int *remap;  /* temp -> orig mapping */
1244         } temp_vert_pairs = {NULL};
1245
1246         if (use_partial_connect) {
1247                 for (uint i = 0; i < edge_net_init_len; i++) {
1248                         for (unsigned j = 0; j < 2; j++) {
1249                                 BMVert *v_delimit = (&edge_arr[i]->v1)[j];
1250                                 BMVert *v_other;
1251
1252                                 /* note, remapping will _never_ map a vertex to an already mapped vertex */
1253                                 while (UNLIKELY((v_other = bm_face_split_edgenet_partial_connect(bm, v_delimit)))) {
1254                                         struct TempVertPair *tvp = BLI_memarena_alloc(mem_arena, sizeof(*tvp));
1255                                         tvp->next = temp_vert_pairs.list;
1256                                         tvp->v_orig = v_delimit;
1257                                         tvp->v_temp = v_other;
1258                                         temp_vert_pairs.list = tvp;
1259                                         temp_vert_pairs.len++;
1260                                 }
1261                         }
1262                 }
1263
1264                 if (temp_vert_pairs.len == 0) {
1265                         use_partial_connect = false;
1266                 }
1267         }
1268 #endif  /* USE_PARTIAL_CONNECT */
1269
1270
1271
1272         uint group_arr_len = 0;
1273         LinkNode *group_head = NULL;
1274         {
1275                 /* scan 'edge_arr' backwards so the outer face boundary is handled first
1276                  * (since its likely to be the largest) */
1277                 uint edge_index = (edge_arr_len - 1);
1278                 uint edge_in_group_tot = 0;
1279
1280                 BLI_SMALLSTACK_DECLARE(vstack, BMVert *);
1281
1282                 while (true) {
1283                         LinkNode *edge_links = NULL;
1284                         uint unique_verts_in_group = 0, unique_edges_in_group = 0;
1285
1286                         /* list of groups */
1287                         BLI_assert(BM_elem_flag_test(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK));
1288                         BLI_SMALLSTACK_PUSH(vstack, edge_arr[edge_index]->v1);
1289                         BM_elem_flag_disable(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK);
1290
1291                         BMVert *v_iter;
1292                         while ((v_iter = BLI_SMALLSTACK_POP(vstack))) {
1293                                 unique_verts_in_group++;
1294
1295                                 BMEdge *e_iter = v_iter->e;
1296                                 do {
1297                                         if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1298                                                 BM_elem_flag_disable(e_iter, EDGE_NOT_IN_STACK);
1299                                                 unique_edges_in_group++;
1300
1301                                                 BLI_linklist_prepend_alloca(&edge_links, e_iter);
1302
1303                                                 BMVert *v_other = BM_edge_other_vert(e_iter, v_iter);
1304                                                 if (BM_elem_flag_test(v_other, VERT_NOT_IN_STACK)) {
1305                                                         BLI_SMALLSTACK_PUSH(vstack, v_other);
1306                                                         BM_elem_flag_disable(v_other, VERT_NOT_IN_STACK);
1307                                                 }
1308                                         }
1309                                 } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_iter)) != v_iter->e);
1310                         }
1311
1312                         struct EdgeGroupIsland *g = alloca(sizeof(*g));
1313                         g->vert_len = unique_verts_in_group;
1314                         g->edge_len = unique_edges_in_group;
1315                         edge_in_group_tot += unique_edges_in_group;
1316
1317                         BLI_linklist_prepend_nlink(&group_head, edge_links, (LinkNode *)g);
1318
1319                         group_arr_len++;
1320
1321                         if (edge_in_group_tot == edge_arr_len) {
1322                                 break;
1323                         }
1324
1325                         /* skip edges in the stack */
1326                         while (BM_elem_flag_test(edge_arr[edge_index], EDGE_NOT_IN_STACK) == false) {
1327                                 BLI_assert(edge_index != 0);
1328                                 edge_index--;
1329                         }
1330                 }
1331         }
1332
1333         /* single group - no holes */
1334         if (group_arr_len == 1) {
1335                 goto finally;
1336         }
1337
1338
1339         /* -------------------------------------------------------------------- */
1340         /* Previous checks need to be kept fast, since they will run very often,
1341          * now we know there are holes, so calculate a spatial lookup info and
1342          * other per-group data.
1343          */
1344
1345         float axis_mat[3][3];
1346         axis_dominant_v3_to_m3(axis_mat, f->no);
1347
1348 #define VERT_IN_ARRAY BM_ELEM_INTERNAL_TAG
1349
1350         struct EdgeGroupIsland **group_arr = BLI_memarena_alloc(mem_arena, sizeof(*group_arr) * group_arr_len);
1351         uint vert_arr_len = 0;
1352         /* sort groups by lowest value vertex */
1353         {
1354                 /* fill 'groups_arr' in reverse order so the boundary face is first */
1355                 struct EdgeGroupIsland **group_arr_p = &group_arr[group_arr_len];
1356
1357                 for (struct EdgeGroupIsland *g = (void *)group_head; g; g = (struct EdgeGroupIsland *)g->edge_links.next) {
1358                         LinkNode *edge_links = g->edge_links.link;
1359
1360                         /* init with *any* different verts */
1361                         g->vert_span.min = ((BMEdge *)edge_links->link)->v1;
1362                         g->vert_span.max = ((BMEdge *)edge_links->link)->v2;
1363                         float min_axis =  FLT_MAX;
1364                         float max_axis = -FLT_MAX;
1365
1366                         do {
1367                                 BMEdge *e = edge_links->link;
1368                                 BLI_assert(e->head.htype == BM_EDGE);
1369
1370                                 for (int j = 0; j < 2; j++) {
1371                                         BMVert *v_iter = (&e->v1)[j];
1372                                         BLI_assert(v_iter->head.htype == BM_VERT);
1373                                         /* ideally we could use 'v_iter->co[SORT_AXIS]' here,
1374                                          * but we need to sort the groups before setting the vertex array order */
1375 #if SORT_AXIS == 0
1376                                         const float axis_value = dot_m3_v3_row_x(axis_mat, v_iter->co);
1377 #else
1378                                         const float axis_value = dot_m3_v3_row_y(axis_mat, v_iter->co);
1379 #endif
1380
1381                                         if (axis_value < min_axis) {
1382                                                 g->vert_span.min = v_iter;
1383                                                 min_axis = axis_value;
1384                                         }
1385                                         if (axis_value > max_axis ) {
1386                                                 g->vert_span.max = v_iter;
1387                                                 max_axis  = axis_value;
1388                                         }
1389                                 }
1390                         } while ((edge_links = edge_links->next));
1391
1392                         g->vert_span.min_axis = min_axis;
1393
1394                         g->has_prev_edge = false;
1395
1396                         vert_arr_len += g->vert_len;
1397
1398                         *(--group_arr_p) = g;
1399                 }
1400         }
1401
1402         qsort(group_arr, group_arr_len, sizeof(*group_arr), group_min_cmp_fn);
1403
1404         /* we don't know how many unique verts there are connecting the edges, so over-alloc */
1405         BMVert **vert_arr = BLI_memarena_alloc(mem_arena, sizeof(*vert_arr) * vert_arr_len);
1406         /* map vertex -> group index */
1407         uint *verts_group_table = BLI_memarena_alloc(mem_arena, sizeof(*verts_group_table) * vert_arr_len);
1408
1409         float (*vert_coords_backup)[3] = BLI_memarena_alloc(mem_arena, sizeof(*vert_coords_backup) * vert_arr_len);
1410
1411         {
1412                 /* relative location, for higher precision calculations */
1413                 const float f_co_ref[3] = {UNPACK3(BM_FACE_FIRST_LOOP(f)->v->co)};
1414
1415                 int v_index = 0;  /* global vert index */
1416                 for (uint g_index = 0; g_index < group_arr_len; g_index++) {
1417                         LinkNode *edge_links = group_arr[g_index]->edge_links.link;
1418                         do {
1419                                 BMEdge *e = edge_links->link;
1420                                 for (int j = 0; j < 2; j++) {
1421                                         BMVert *v_iter = (&e->v1)[j];
1422                                         if (!BM_elem_flag_test(v_iter, VERT_IN_ARRAY)) {
1423                                                 BM_elem_flag_enable(v_iter, VERT_IN_ARRAY);
1424
1425                                                 /* not nice, but alternatives arent much better :S */
1426                                                 {
1427                                                         copy_v3_v3(vert_coords_backup[v_index], v_iter->co);
1428
1429                                                         /* for higher precision */
1430                                                         sub_v3_v3(v_iter->co, f_co_ref);
1431
1432                                                         float co_2d[2];
1433                                                         mul_v2_m3v3(co_2d, axis_mat, v_iter->co);
1434                                                         v_iter->co[0] = co_2d[0];
1435                                                         v_iter->co[1] = co_2d[1];
1436                                                         v_iter->co[2] = 0.0f;
1437                                                 }
1438
1439                                                 BM_elem_index_set(v_iter, v_index);  /* set_dirty */
1440
1441                                                 vert_arr[v_index] = v_iter;
1442                                                 verts_group_table[v_index] = g_index;
1443                                                 v_index++;
1444                                         }
1445                                 }
1446                         } while ((edge_links = edge_links->next));
1447                 }
1448         }
1449
1450         bm->elem_index_dirty |= BM_VERT;
1451
1452         /* Now create bvh tree*/
1453         BVHTree *bvhtree = BLI_bvhtree_new(edge_arr_len, 0.0f, 8, 8);
1454         for (uint i = 0; i < edge_arr_len; i++) {
1455                 const float e_cos[2][3] = {
1456                     {UNPACK2(edge_arr[i]->v1->co), 0.0f},
1457                     {UNPACK2(edge_arr[i]->v2->co), 0.0f},
1458                 };
1459                 BLI_bvhtree_insert(bvhtree, i, (const float *)e_cos, 2);
1460         }
1461         BLI_bvhtree_balance(bvhtree);
1462
1463
1464
1465 #ifdef USE_PARTIAL_CONNECT
1466         if (use_partial_connect) {
1467                 /* needs to be done once the vertex indices have been written into */
1468                 temp_vert_pairs.remap = BLI_memarena_alloc(mem_arena, sizeof(*temp_vert_pairs.remap) * vert_arr_len);
1469                 copy_vn_i(temp_vert_pairs.remap, vert_arr_len, -1);
1470
1471                 struct TempVertPair *tvp = temp_vert_pairs.list;
1472                 do {
1473                         temp_vert_pairs.remap[BM_elem_index_get(tvp->v_temp)] = BM_elem_index_get(tvp->v_orig);
1474                 } while ((tvp = tvp->next));
1475         }
1476 #endif  /* USE_PARTIAL_CONNECT */
1477
1478
1479
1480         /* Create connections between groups */
1481
1482         /* may be an over-alloc, but not by much */
1483         uint edge_net_new_len = (uint)edge_net_init_len + ((group_arr_len - 1) * 2);
1484         BMEdge **edge_net_new = BLI_memarena_alloc(mem_arena, sizeof(*edge_net_new) * edge_net_new_len);
1485         memcpy(edge_net_new, edge_net_init, sizeof(*edge_net_new) * (size_t)edge_net_init_len);
1486
1487         {
1488                 uint edge_net_new_index = edge_net_init_len;
1489                 /* start-end of the verts in the current group */
1490
1491                 uint vert_range[2];
1492
1493                 vert_range[0] = 0;
1494                 vert_range[1] = group_arr[0]->vert_len;
1495
1496                 struct EdgeGroup_FindConnection_Args args = {
1497                         .bvhtree = bvhtree,
1498
1499                         /* use the new edge array so we can scan edges which have been added */
1500                         .edge_arr = edge_arr,
1501                         .edge_arr_len = edge_arr_len,
1502
1503                         /* we only want to check newly created edges */
1504                         .edge_arr_new = edge_net_new + edge_net_init_len,
1505                         .edge_arr_new_len = 0,
1506
1507                         .vert_range = vert_range,
1508                 };
1509
1510                 for (uint g_index = 1; g_index < group_arr_len; g_index++) {
1511                         struct EdgeGroupIsland *g = group_arr[g_index];
1512
1513                         /* the range of verts this group uses in 'verts_arr' (not uncluding the last index) */
1514                         vert_range[0]  = vert_range[1];
1515                         vert_range[1] += g->vert_len;
1516
1517                         if (g->has_prev_edge == false) {
1518                                 BMVert *v_origin = g->vert_span.min;
1519
1520                                 const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, false);
1521                                 // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1522
1523                                 /* only for degenerate geometry */
1524                                 if (index_other != -1) {
1525 #ifdef USE_PARTIAL_CONNECT
1526                                         if ((use_partial_connect == false) ||
1527                                             (bm_vert_partial_connect_check_overlap(
1528                                                  temp_vert_pairs.remap,
1529                                                  BM_elem_index_get(v_origin), index_other) == false))
1530 #endif
1531                                         {
1532                                                 BMVert *v_end = vert_arr[index_other];
1533
1534                                                 edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1535 #ifdef USE_PARTIAL_CONNECT
1536                                                 BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1537 #endif
1538                                                 edge_net_new_index++;
1539                                                 args.edge_arr_new_len++;
1540                                         }
1541                                 }
1542                         }
1543
1544                         {
1545                                 BMVert *v_origin = g->vert_span.max;
1546
1547                                 const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, true);
1548                                 // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1549
1550                                 /* only for degenerate geometry */
1551                                 if (index_other != -1) {
1552 #ifdef USE_PARTIAL_CONNECT
1553                                         if ((use_partial_connect == false) ||
1554                                             (bm_vert_partial_connect_check_overlap(
1555                                                  temp_vert_pairs.remap,
1556                                                  BM_elem_index_get(v_origin), index_other) == false))
1557 #endif
1558                                         {
1559                                                 BMVert *v_end = vert_arr[index_other];
1560                                                 edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1561 #ifdef USE_PARTIAL_CONNECT
1562                                                 BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1563 #endif
1564                                                 edge_net_new_index++;
1565                                                 args.edge_arr_new_len++;
1566                                         }
1567
1568                                         /* tell the 'next' group it doesn't need to create its own back-link */
1569                                         uint g_index_other = verts_group_table[index_other];
1570                                         group_arr[g_index_other]->has_prev_edge = true;
1571                                 }
1572                         }
1573
1574                 }
1575                 BLI_assert(edge_net_new_len >= edge_net_new_index);
1576                 edge_net_new_len = edge_net_new_index;
1577         }
1578
1579         BLI_bvhtree_free(bvhtree);
1580
1581         *r_edge_net_new = edge_net_new;
1582         *r_edge_net_new_len = edge_net_new_len;
1583         ok = true;
1584
1585         for (uint i = 0; i < vert_arr_len; i++) {
1586                 copy_v3_v3(vert_arr[i]->co, vert_coords_backup[i]);
1587         }
1588
1589 finally:
1590
1591 #ifdef USE_PARTIAL_CONNECT
1592         /* don't free 'vert_temp_pair_list', its part of the arena */
1593         if (use_partial_connect) {
1594
1595                 /* Sanity check: ensure we don't have connecting edges before splicing begins. */
1596 #ifdef DEBUG
1597                 {
1598                         struct TempVertPair *tvp = temp_vert_pairs.list;
1599                         do {
1600                                 /* we must _never_ create connections here
1601                                  * (inface the islands can't have a connection at all) */
1602                                 BLI_assert(BM_edge_exists(tvp->v_orig, tvp->v_temp) == NULL);
1603                         } while ((tvp = tvp->next));
1604                 }
1605 #endif
1606
1607                 struct TempVertPair *tvp = temp_vert_pairs.list;
1608                 do {
1609                         /* its _very_ unlikely the edge exists,
1610                          * however splicing may case this. see: T48012 */
1611                         if (!BM_edge_exists(tvp->v_orig, tvp->v_temp)) {
1612                                 BM_vert_splice(bm, tvp->v_orig, tvp->v_temp);
1613                         }
1614                 } while ((tvp = tvp->next));
1615
1616                 /* Remove edges which have become doubles since splicing vertices together,
1617                  * its less trouble then detecting future-doubles on edge-creation. */
1618                 for (uint i = edge_net_init_len; i < edge_net_new_len; i++) {
1619                         while (BM_edge_find_double(edge_net_new[i])) {
1620                                 BM_edge_kill(bm, edge_net_new[i]);
1621                                 edge_net_new_len--;
1622                                 if (i == edge_net_new_len) {
1623                                         break;
1624                                 }
1625                                 edge_net_new[i] = edge_net_new[edge_net_new_len];
1626                         }
1627                 }
1628
1629                 *r_edge_net_new_len = edge_net_new_len;
1630         }
1631 #endif
1632
1633
1634         for (uint i = 0; i < edge_arr_len; i++) {
1635                 BM_elem_flag_disable(edge_arr[i], EDGE_NOT_IN_STACK);
1636                 BM_elem_flag_disable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
1637                 BM_elem_flag_disable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
1638         }
1639
1640 #undef VERT_IN_ARRAY
1641 #undef VERT_NOT_IN_STACK
1642 #undef EDGE_NOT_IN_STACK
1643
1644         return ok;
1645 }
1646
1647 #undef SORT_AXIS
1648
1649 /** \} */