Cleanup: style
[blender.git] / source / blender / draw / intern / draw_cache_impl_displist.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup draw
22  *
23  * \brief DispList API for render engines
24  *
25  * \note DispList may be removed soon! This is a utility for object types that use render.
26  */
27
28
29 #include "BLI_alloca.h"
30 #include "BLI_utildefines.h"
31 #include "BLI_edgehash.h"
32 #include "BLI_math_vector.h"
33
34 #include "DNA_curve_types.h"
35
36 #include "BKE_displist.h"
37
38 #include "GPU_batch.h"
39 #include "GPU_extensions.h"
40
41 #include "draw_cache_impl.h"  /* own include */
42
43 static int dl_vert_len(const DispList *dl)
44 {
45         switch (dl->type) {
46                 case DL_INDEX3:
47                 case DL_INDEX4:
48                         return dl->nr;
49                 case DL_SURF:
50                         return dl->parts * dl->nr;
51         }
52         return 0;
53 }
54
55 static int dl_tri_len(const DispList *dl)
56 {
57         switch (dl->type) {
58                 case DL_INDEX3:
59                         return dl->parts;
60                 case DL_INDEX4:
61                         return dl->parts * 2;
62                 case DL_SURF:
63                         return dl->totindex * 2;
64         }
65         return 0;
66 }
67
68 /* see: displist_get_allverts */
69 static int curve_render_surface_vert_len_get(const ListBase *lb)
70 {
71         int vert_len = 0;
72         for (const DispList *dl = lb->first; dl; dl = dl->next) {
73                 vert_len += dl_vert_len(dl);
74         }
75         return vert_len;
76 }
77
78 static int curve_render_surface_tri_len_get(const ListBase *lb)
79 {
80         int tri_len = 0;
81         for (const DispList *dl = lb->first; dl; dl = dl->next) {
82                 tri_len += dl_tri_len(dl);
83         }
84         return tri_len;
85 }
86
87 typedef void (SetTriIndicesFn)(void *thunk, uint v1, uint v2, uint v3);
88
89 static void displist_indexbufbuilder_set(
90         SetTriIndicesFn *set_tri_indices,
91         SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
92         void *thunk, const DispList *dl, const int ofs)
93 {
94         if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
95                 const int *idx = dl->index;
96                 if (dl->type == DL_INDEX3) {
97                         const int i_end = dl->parts;
98                         for (int i = 0; i < i_end; i++, idx += 3) {
99                                 set_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
100                         }
101                 }
102                 else if (dl->type == DL_SURF) {
103                         const int i_end = dl->totindex;
104                         for (int i = 0; i < i_end; i++, idx += 4) {
105                                 set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
106                                 set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[3] + ofs);
107                         }
108                 }
109                 else {
110                         BLI_assert(dl->type == DL_INDEX4);
111                         const int i_end = dl->parts;
112                         for (int i = 0; i < i_end; i++, idx += 4) {
113                                 if (idx[2] != idx[3]) {
114                                         set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
115                                         set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[3] + ofs);
116                                 }
117                                 else {
118                                         set_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
119                                 }
120                         }
121                 }
122         }
123 }
124
125 static int displist_indexbufbuilder_tess_set(
126         SetTriIndicesFn *set_tri_indices,
127         SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
128         void *thunk, const DispList *dl, const int ofs)
129 {
130         int v_idx = ofs;
131         if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
132                 if (dl->type == DL_INDEX3) {
133                         for (int i = 0; i < dl->parts; i++) {
134                                 set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
135                                 v_idx += 3;
136                         }
137                 }
138                 else if (dl->type == DL_SURF) {
139                         for (int a = 0; a < dl->parts; a++) {
140                                 if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
141                                         break;
142                                 }
143                                 int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
144                                 for (; b < dl->nr; b++) {
145                                         set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
146                                         set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
147                                         v_idx += 6;
148                                 }
149                         }
150                 }
151                 else {
152                         BLI_assert(dl->type == DL_INDEX4);
153                         const int *idx = dl->index;
154                         for (int i = 0; i < dl->parts; i++, idx += 4) {
155                                 if (idx[2] != idx[3]) {
156                                         set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
157                                         set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
158                                         v_idx += 6;
159                                 }
160                                 else {
161                                         set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
162                                         v_idx += 3;
163                                 }
164                         }
165                 }
166         }
167         return v_idx;
168 }
169
170 void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo)
171 {
172         static GPUVertFormat format = { 0 };
173         static struct { uint pos, nor; } attr_id;
174         if (format.attr_len == 0) {
175                 /* initialize vertex format */
176                 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
177                 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
178         }
179
180         GPU_vertbuf_init_with_format(vbo, &format);
181         GPU_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
182
183         BKE_displist_normals_add(lb);
184
185         int vbo_len_used = 0;
186         for (const DispList *dl = lb->first; dl; dl = dl->next) {
187                 const bool ndata_is_single = dl->type == DL_INDEX3;
188                 if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
189                         const float *fp_co = dl->verts;
190                         const float *fp_no = dl->nors;
191                         const int vbo_end = vbo_len_used + dl_vert_len(dl);
192                         while (vbo_len_used < vbo_end) {
193                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, fp_co);
194                                 if (fp_no) {
195                                         GPUPackedNormal vnor_pack = GPU_normal_convert_i10_v3(fp_no);
196                                         GPU_vertbuf_attr_set(vbo, attr_id.nor, vbo_len_used, &vnor_pack);
197                                         if (ndata_is_single == false) {
198                                                 fp_no += 3;
199                                         }
200                                 }
201                                 fp_co += 3;
202                                 vbo_len_used += 1;
203                         }
204                 }
205         }
206 }
207
208 void DRW_displist_vertbuf_create_wiredata(ListBase *lb, GPUVertBuf *vbo)
209 {
210         static GPUVertFormat format = { 0 };
211         static struct { uint wd; } attr_id;
212         if (format.attr_len == 0) {
213                 /* initialize vertex format */
214                 if (!GPU_crappy_amd_driver()) {
215                         /* Some AMD drivers strangely crash with a vbo with this format. */
216                         attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
217                 }
218                 else {
219                         attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
220                 }
221         }
222
223         int vbo_len_used = curve_render_surface_vert_len_get(lb);
224
225         GPU_vertbuf_init_with_format(vbo, &format);
226         GPU_vertbuf_data_alloc(vbo, vbo_len_used);
227
228         if (vbo->format.stride == 1) {
229                 memset(vbo->data, 0xFF, (size_t)vbo_len_used);
230         }
231         else {
232                 GPUVertBufRaw wd_step;
233                 GPU_vertbuf_attr_get_raw_data(vbo, attr_id.wd, &wd_step);
234                 for (int i = 0; i < vbo_len_used; i++) {
235                         *((float *)GPU_vertbuf_raw_step(&wd_step)) = 1.0f;
236                 }
237         }
238 }
239
240 void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
241 {
242         const int tri_len = curve_render_surface_tri_len_get(lb);
243         const int vert_len = curve_render_surface_vert_len_get(lb);
244
245         GPUIndexBufBuilder elb;
246         GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len);
247
248         int ofs = 0;
249         for (const DispList *dl = lb->first; dl; dl = dl->next) {
250                 displist_indexbufbuilder_set(
251                         (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
252                         (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
253                         &elb, dl, ofs);
254                 ofs += dl_vert_len(dl);
255         }
256
257         GPU_indexbuf_build_in_place(&elb, ibo);
258 }
259
260 void DRW_displist_indexbuf_create_triangles_loop_split_by_material(
261         ListBase *lb,
262         GPUIndexBuf **ibo_mats, uint mat_len)
263 {
264         GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
265
266         const int tri_len = curve_render_surface_tri_len_get(lb);
267
268         /* Init each index buffer builder */
269         for (int i = 0; i < mat_len; i++) {
270                 GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
271         }
272
273         /* calc each index buffer builder */
274         uint v_idx = 0;
275         for (const DispList *dl = lb->first; dl; dl = dl->next) {
276                 v_idx = displist_indexbufbuilder_tess_set(
277                         (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
278                         (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
279                         &elb[dl->col], dl, v_idx);
280         }
281
282         /* build each indexbuf */
283         for (int i = 0; i < mat_len; i++) {
284                 GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
285         }
286 }
287
288 static void set_overlay_wires_tri_indices(void *thunk, uint v1, uint v2, uint v3)
289 {
290         GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
291         GPU_indexbuf_add_line_verts(eld, v1, v2);
292         GPU_indexbuf_add_line_verts(eld, v2, v3);
293         GPU_indexbuf_add_line_verts(eld, v3, v1);
294 }
295
296 static void set_overlay_wires_quad_tri_indices(void *thunk, uint v1, uint v2, uint v3)
297 {
298         GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
299         GPU_indexbuf_add_line_verts(eld, v1, v3);
300         GPU_indexbuf_add_line_verts(eld, v3, v2);
301 }
302
303 void DRW_displist_indexbuf_create_lines_in_order(ListBase *lb, GPUIndexBuf *ibo)
304 {
305         const int tri_len = curve_render_surface_tri_len_get(lb);
306         const int vert_len = curve_render_surface_vert_len_get(lb);
307
308         GPUIndexBufBuilder elb;
309         GPU_indexbuf_init(&elb, GPU_PRIM_LINES, tri_len * 3, vert_len);
310
311         int ofs = 0;
312         for (const DispList *dl = lb->first; dl; dl = dl->next) {
313                 displist_indexbufbuilder_set(
314                         (SetTriIndicesFn *)set_overlay_wires_tri_indices,
315                         (SetTriIndicesFn *)set_overlay_wires_quad_tri_indices,
316                         &elb, dl, ofs);
317                 ofs += dl_vert_len(dl);
318         }
319
320         GPU_indexbuf_build_in_place(&elb, ibo);
321 }
322
323 static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
324 {
325         int orco_sizeu = dl->nr - 1;
326         int orco_sizev = dl->parts - 1;
327
328         /* exception as handled in convertblender.c too */
329         if (dl->flag & DL_CYCL_U) {
330                 orco_sizeu++;
331         }
332         if (dl->flag & DL_CYCL_V) {
333                 orco_sizev++;
334         }
335
336         for (int i = 0; i < 4; i++) {
337                 /* find uv based on vertex index into grid array */
338                 r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
339                 r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
340
341                 /* cyclic correction */
342                 if ((i == 1 || i == 2) && r_uv[i][0] == 0.0f) {
343                         r_uv[i][0] = 1.0f;
344                 }
345                 if ((i == 0 || i == 1) && r_uv[i][1] == 0.0f) {
346                         r_uv[i][1] = 1.0f;
347                 }
348         }
349 }
350
351 static void displist_vertbuf_attr_set_tri_pos_nor_uv(
352         GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step, GPUVertBufRaw *uv_step,
353         const float v1[3], const float v2[3], const float v3[3],
354         const GPUPackedNormal *n1, const GPUPackedNormal *n2, const GPUPackedNormal *n3,
355         const float uv1[2], const float uv2[2], const float uv3[2])
356 {
357         if (pos_step->size != 0) {
358                 copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
359                 copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
360                 copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
361
362                 *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n1;
363                 *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n2;
364                 *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n3;
365         }
366
367         if (uv_step->size != 0) {
368                 normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
369                 normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
370                 normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
371         }
372 }
373
374 void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(
375         ListBase *lb,
376         GPUVertBuf *vbo_pos_nor, GPUVertBuf *vbo_uv)
377 {
378         static GPUVertFormat format_pos_nor = { 0 };
379         static GPUVertFormat format_uv = { 0 };
380         static struct { uint pos, nor, uv; } attr_id;
381         if (format_pos_nor.attr_len == 0) {
382                 /* initialize vertex format */
383                 attr_id.pos = GPU_vertformat_attr_add(&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
384                 attr_id.nor = GPU_vertformat_attr_add(&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
385                 GPU_vertformat_triple_load(&format_pos_nor);
386                 /* UVs are in [0..1] range. We can compress them. */
387                 attr_id.uv = GPU_vertformat_attr_add(&format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
388         }
389
390         int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
391
392         GPUVertBufRaw pos_step = {0};
393         GPUVertBufRaw nor_step = {0};
394         GPUVertBufRaw uv_step = {0};
395
396         if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
397                 GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
398                 GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
399                 GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &pos_step);
400                 GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &nor_step);
401         }
402         if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
403                 GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
404                 GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
405                 GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
406         }
407
408         BKE_displist_normals_add(lb);
409
410         for (const DispList *dl = lb->first; dl; dl = dl->next) {
411                 const bool is_smooth = (dl->rt & CU_SMOOTH) != 0;
412                 if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
413                         const float(*verts)[3] = (float(*)[3])dl->verts;
414                         const float(*nors)[3] = (float(*)[3])dl->nors;
415                         const int *idx = dl->index;
416                         float uv[4][2];
417
418                         if (dl->type == DL_INDEX3) {
419                                 /* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
420                                 const GPUPackedNormal pnor = GPU_normal_convert_i10_v3(dl->nors);
421                                 const float x_max = (float)(dl->nr - 1);
422                                 uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
423                                 const int i_end = dl->parts;
424                                 for (int i = 0; i < i_end; i++, idx += 3) {
425                                         if (vbo_uv) {
426                                                 uv[0][0] = idx[0] / x_max;
427                                                 uv[1][0] = idx[1] / x_max;
428                                                 uv[2][0] = idx[2] / x_max;
429                                         }
430
431                                         displist_vertbuf_attr_set_tri_pos_nor_uv(
432                                                 &pos_step, &nor_step, &uv_step,
433                                                 verts[idx[0]], verts[idx[2]], verts[idx[1]],
434                                                 &pnor, &pnor, &pnor,
435                                                 uv[0], uv[2], uv[1]);
436                                 }
437                         }
438                         else if (dl->type == DL_SURF) {
439                                 uint quad[4];
440                                 for (int a = 0; a < dl->parts; a++) {
441                                         if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
442                                                 break;
443                                         }
444
445                                         int b;
446                                         if (dl->flag & DL_CYCL_U) {
447                                                 quad[0] = dl->nr * a;
448                                                 quad[3] = quad[0] + dl->nr - 1;
449                                                 quad[1] = quad[0] + dl->nr;
450                                                 quad[2] = quad[3] + dl->nr;
451                                                 b = 0;
452                                         }
453                                         else {
454                                                 quad[3] = dl->nr * a;
455                                                 quad[0] = quad[3] + 1;
456                                                 quad[2] = quad[3] + dl->nr;
457                                                 quad[1] = quad[0] + dl->nr;
458                                                 b = 1;
459                                         }
460                                         if ((dl->flag & DL_CYCL_V) && a == dl->parts - 1) {
461                                                 quad[1] -= dl->parts * dl->nr;
462                                                 quad[2] -= dl->parts * dl->nr;
463                                         }
464
465                                         for (; b < dl->nr; b++) {
466                                                 if (vbo_uv) {
467                                                         surf_uv_quad(dl, quad, uv);
468                                                 }
469
470                                                 GPUPackedNormal pnors_quad[4];
471                                                 if (is_smooth) {
472                                                         for (int j = 0; j < 4; j++) {
473                                                                 pnors_quad[j] = GPU_normal_convert_i10_v3(nors[quad[j]]);
474                                                         }
475                                                 }
476                                                 else {
477                                                         float nor_flat[3];
478                                                         normal_quad_v3(nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
479                                                         pnors_quad[0] = GPU_normal_convert_i10_v3(nor_flat);
480                                                         pnors_quad[1] = pnors_quad[0];
481                                                         pnors_quad[2] = pnors_quad[0];
482                                                         pnors_quad[3] = pnors_quad[0];
483                                                 }
484
485                                                 displist_vertbuf_attr_set_tri_pos_nor_uv(
486                                                         &pos_step, &nor_step, &uv_step,
487                                                         verts[quad[2]], verts[quad[0]], verts[quad[1]],
488                                                         &pnors_quad[2], &pnors_quad[0], &pnors_quad[1],
489                                                         uv[2], uv[0], uv[1]);
490
491                                                 displist_vertbuf_attr_set_tri_pos_nor_uv(
492                                                         &pos_step, &nor_step, &uv_step,
493                                                         verts[quad[0]], verts[quad[2]], verts[quad[3]],
494                                                         &pnors_quad[0], &pnors_quad[2], &pnors_quad[3],
495                                                         uv[0], uv[2], uv[3]);
496
497                                                 quad[2] = quad[1];
498                                                 quad[1]++;
499                                                 quad[3] = quad[0];
500                                                 quad[0]++;
501                                         }
502                                 }
503                         }
504                         else {
505                                 BLI_assert(dl->type == DL_INDEX4);
506                                 uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
507                                 uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
508
509                                 const int i_end = dl->parts;
510                                 for (int i = 0; i < i_end; i++, idx += 4) {
511                                         const bool is_tri = idx[2] != idx[3];
512
513                                         GPUPackedNormal pnors_idx[4];
514                                         if (is_smooth) {
515                                                 int idx_len = is_tri ? 3 : 4;
516                                                 for (int j = 0; j < idx_len; j++) {
517                                                         pnors_idx[j] = GPU_normal_convert_i10_v3(nors[idx[j]]);
518                                                 }
519                                         }
520                                         else {
521                                                 float nor_flat[3];
522                                                 if (is_tri) {
523                                                         normal_tri_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]]);
524                                                 }
525                                                 else {
526                                                         normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
527                                                 }
528                                                 pnors_idx[0] = GPU_normal_convert_i10_v3(nor_flat);
529                                                 pnors_idx[1] = pnors_idx[0];
530                                                 pnors_idx[2] = pnors_idx[0];
531                                                 pnors_idx[3] = pnors_idx[0];
532                                         }
533
534                                         displist_vertbuf_attr_set_tri_pos_nor_uv(
535                                                 &pos_step, &nor_step, &uv_step,
536                                                 verts[idx[0]], verts[idx[2]], verts[idx[1]],
537                                                 &pnors_idx[0], &pnors_idx[2], &pnors_idx[1],
538                                                 uv[0], uv[2], uv[1]);
539
540                                         if (idx[2] != idx[3]) {
541                                                 displist_vertbuf_attr_set_tri_pos_nor_uv(
542                                                         &pos_step, &nor_step, &uv_step,
543                                                         verts[idx[2]], verts[idx[0]], verts[idx[3]],
544                                                         &pnors_idx[2], &pnors_idx[0], &pnors_idx[3],
545                                                         uv[2], uv[0], uv[3]);
546                                         }
547                                 }
548                         }
549                 }
550         }
551         /* Resize and finish. */
552         if (pos_step.size != 0) {
553                 int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
554                 if (vbo_len_used < vbo_len_capacity) {
555                         GPU_vertbuf_data_resize(vbo_pos_nor, vbo_len_used);
556                 }
557         }
558         if (uv_step.size != 0) {
559                 int vbo_len_used = GPU_vertbuf_raw_used(&uv_step);
560                 if (vbo_len_used < vbo_len_capacity) {
561                         GPU_vertbuf_data_resize(vbo_uv, vbo_len_used);
562                 }
563         }
564 }
565
566 /* Edge detection/adjecency */
567 #define NO_EDGE INT_MAX
568 static void set_edge_adjacency_lines_indices(EdgeHash *eh, GPUIndexBufBuilder *elb, bool *r_is_manifold, uint v1, uint v2, uint v3)
569 {
570         bool inv_indices = (v2 > v3);
571         void **pval;
572         bool value_is_init = BLI_edgehash_ensure_p(eh, v2, v3, &pval);
573         int v_data = POINTER_AS_INT(*pval);
574         if (!value_is_init || v_data == NO_EDGE) {
575                 /* Save the winding order inside the sign bit. Because the
576                  * edgehash sort the keys and we need to compare winding later. */
577                 int value = (int)v1 + 1; /* Int 0 bm_looptricannot be signed */
578                 *pval = POINTER_FROM_INT((inv_indices) ? -value : value);
579         }
580         else {
581                 /* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
582                 *pval = POINTER_FROM_INT(NO_EDGE);
583                 bool inv_opposite = (v_data < 0);
584                 uint v_opposite = (uint)abs(v_data) - 1;
585
586                 if (inv_opposite == inv_indices) {
587                         /* Don't share edge if triangles have non matching winding. */
588                         GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v1);
589                         GPU_indexbuf_add_line_adj_verts(elb, v_opposite, v2, v3, v_opposite);
590                         *r_is_manifold = false;
591                 }
592                 else {
593                         GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v_opposite);
594                 }
595         }
596 }
597
598 static void set_edges_adjacency_lines_indices(void *thunk, uint v1, uint v2, uint v3)
599 {
600         void **packed = (void**)thunk;
601         GPUIndexBufBuilder *elb = (GPUIndexBufBuilder*)packed[0];
602         EdgeHash *eh = (EdgeHash*)packed[1];
603         bool *r_is_manifold = (bool*)packed[2];
604
605         set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v1, v2, v3);
606         set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v2, v3, v1);
607         set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v3, v1, v2);
608 }
609
610 void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb, struct GPUIndexBuf *ibo, bool *r_is_manifold)
611 {
612         const int tri_len = curve_render_surface_tri_len_get(lb);
613         const int vert_len = curve_render_surface_vert_len_get(lb);
614
615         *r_is_manifold = true;
616
617         /* Allocate max but only used indices are sent to GPU. */
618         GPUIndexBufBuilder elb;
619         GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, tri_len * 3, vert_len);
620
621         EdgeHash *eh = BLI_edgehash_new_ex(__func__, tri_len * 3);
622
623         /* pack values to pass to `set_edges_adjacency_lines_indices` function. */
624         void *thunk[3] = {&elb, eh, r_is_manifold};
625         int v_idx = 0;
626         for (const DispList *dl = lb->first; dl; dl = dl->next) {
627                 displist_indexbufbuilder_set(
628                         (SetTriIndicesFn *)set_edges_adjacency_lines_indices,
629                         (SetTriIndicesFn *)set_edges_adjacency_lines_indices,
630                         thunk, dl, v_idx);
631                 v_idx += dl_vert_len(dl);
632         }
633
634         /* Create edges for remaning non manifold edges. */
635         EdgeHashIterator *ehi;
636         for (ehi = BLI_edgehashIterator_new(eh);
637              BLI_edgehashIterator_isDone(ehi) == false;
638              BLI_edgehashIterator_step(ehi))
639         {
640                 uint v1, v2;
641                 int v_data = POINTER_AS_INT(BLI_edgehashIterator_getValue(ehi));
642                 if (v_data == NO_EDGE) {
643                         continue;
644                 }
645                 BLI_edgehashIterator_getKey(ehi, &v1, &v2);
646                 uint v0 = (uint)abs(v_data) - 1;
647                 if (v_data < 0) { /* inv_opposite  */
648                         SWAP(uint, v1, v2);
649                 }
650                 GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
651                 *r_is_manifold = false;
652         }
653         BLI_edgehashIterator_free(ehi);
654         BLI_edgehash_free(eh, NULL);
655
656         GPU_indexbuf_build_in_place(&elb, ibo);
657 }
658 #undef NO_EDGE