Cleanup: variable name
[blender.git] / source / blender / draw / intern / draw_cache_impl_curve.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file draw_cache_impl_curve.c
25  *  \ingroup draw
26  *
27  * \brief Curve API for render engines
28  */
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_utildefines.h"
33 #include "BLI_math_vector.h"
34
35 #include "DNA_curve_types.h"
36
37 #include "BKE_curve.h"
38 #include "BKE_displist.h"
39 #include "BKE_font.h"
40
41 #include "GPU_batch.h"
42 #include "GPU_texture.h"
43 #include "GPU_material.h"
44
45 #include "UI_resources.h"
46
47 #include "DRW_render.h"
48
49 #include "draw_cache_impl.h"  /* own include */
50
51 #define SELECT            1
52 #define ACTIVE_NURB       1 << 2
53 #define EVEN_U_BIT        1 << 3 /* Alternate this bit for every U vert. */
54
55 /* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
56 enum {
57         COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
58
59         TOT_HANDLE_COL,
60 };
61
62 /**
63  * TODO
64  * - Ensure `CurveCache`, `SEQUENCER_DAG_WORKAROUND`.
65  * - Check number of verts/edges to see if cache is valid.
66  * - Check if 'overlay.edges' can use single attribute per edge, not 2 (for selection drawing).
67  */
68
69 static void curve_batch_cache_clear(Curve *cu);
70
71 /* ---------------------------------------------------------------------- */
72 /* Curve Interface, direct access to basic data. */
73
74 static void curve_render_overlay_verts_edges_len_get(
75         ListBase *lb, int *r_vert_len, int *r_edge_len)
76 {
77         BLI_assert(r_vert_len || r_edge_len);
78         int vert_len = 0;
79         int edge_len = 0;
80         for (Nurb *nu = lb->first; nu; nu = nu->next) {
81                 if (nu->bezt) {
82                         vert_len += nu->pntsu * 3;
83                         /* 2x handles per point*/
84                         edge_len += 2 * nu->pntsu;
85                 }
86                 else if (nu->bp) {
87                         vert_len += nu->pntsu * nu->pntsv;
88                         /* segments between points */
89                         edge_len += (nu->pntsu - 1) * nu->pntsv;
90                         edge_len += (nu->pntsv - 1) * nu->pntsu;
91                 }
92         }
93         if (r_vert_len) {
94                 *r_vert_len = vert_len;
95         }
96         if (r_edge_len) {
97                 *r_edge_len = edge_len;
98         }
99 }
100
101 static void curve_render_wire_verts_edges_len_get(
102         const CurveCache *ob_curve_cache,
103         int *r_curve_len, int *r_vert_len, int *r_edge_len)
104 {
105         BLI_assert(r_vert_len || r_edge_len);
106         int vert_len = 0;
107         int edge_len = 0;
108         int curve_len = 0;
109         for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
110                 if (bl->nr > 0) {
111                         const bool is_cyclic = bl->poly != -1;
112                         edge_len += (is_cyclic) ? bl->nr : bl->nr - 1;
113                         vert_len += bl->nr;
114                         curve_len += 1;
115                 }
116         }
117         for (const DispList *dl = ob_curve_cache->disp.first; dl; dl = dl->next) {
118                 if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
119                         BLI_assert(dl->parts == 1);
120                         const bool is_cyclic = dl->type == DL_POLY;
121                         edge_len += (is_cyclic) ? dl->nr : dl->nr - 1;
122                         vert_len += dl->nr;
123                         curve_len += 1;
124                 }
125         }
126         if (r_vert_len) {
127                 *r_vert_len = vert_len;
128         }
129         if (r_edge_len) {
130                 *r_edge_len = edge_len;
131         }
132         if (r_curve_len) {
133                 *r_curve_len = curve_len;
134         }
135 }
136
137 static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
138 {
139         int normal_len = 0;
140         const BevList *bl;
141         const Nurb *nu;
142         for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
143                 int nr = bl->nr;
144                 int skip = nu->resolu / 16;
145 #if 0
146                 while (nr-- > 0) { /* accounts for empty bevel lists */
147                         normal_len += 1;
148                         nr -= skip;
149                 }
150 #else
151                 /* Same as loop above */
152                 normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
153 #endif
154         }
155         return normal_len;
156 }
157
158 /* ---------------------------------------------------------------------- */
159 /* Curve Interface, indirect, partially cached access to complex data. */
160
161 typedef struct CurveRenderData {
162         int types;
163
164         struct {
165                 int vert_len;
166                 int edge_len;
167         } overlay;
168
169         struct {
170                 int curve_len;
171                 int vert_len;
172                 int edge_len;
173         } wire;
174
175         /* edit mode normal's */
176         struct {
177                 /* 'edge_len == len * 2'
178                  * 'vert_len == len * 3' */
179                 int len;
180         } normal;
181
182         struct {
183                 EditFont *edit_font;
184         } text;
185
186         /* borrow from 'Object' */
187         CurveCache *ob_curve_cache;
188
189         /* borrow from 'Curve' */
190         ListBase *nurbs;
191
192         /* edit, index in nurb list */
193         int actnu;
194         /* edit, index in active nurb (BPoint or BezTriple) */
195         int actvert;
196 } CurveRenderData;
197
198 enum {
199         /* Wire center-line */
200         CU_DATATYPE_WIRE        = 1 << 0,
201         /* Edit-mode verts and optionally handles */
202         CU_DATATYPE_OVERLAY     = 1 << 1,
203         /* Edit-mode normals */
204         CU_DATATYPE_NORMAL      = 1 << 2,
205         /* Geometry */
206         CU_DATATYPE_SURFACE     = 1 << 3,
207         /* Text */
208         CU_DATATYPE_TEXT_SELECT = 1 << 4,
209 };
210
211 /*
212  * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
213  */
214 static CurveRenderData *curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
215 {
216         CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
217         rdata->types = types;
218         ListBase *nurbs;
219
220         rdata->actnu = cu->actnu;
221         rdata->actvert = cu->actvert;
222
223         rdata->ob_curve_cache = ob_curve_cache;
224
225         if (types & CU_DATATYPE_WIRE) {
226                 curve_render_wire_verts_edges_len_get(
227                         rdata->ob_curve_cache,
228                         &rdata->wire.curve_len, &rdata->wire.vert_len, &rdata->wire.edge_len);
229         }
230
231         if (cu->editnurb) {
232                 EditNurb *editnurb = cu->editnurb;
233                 nurbs = &editnurb->nurbs;
234
235                 if (types & CU_DATATYPE_OVERLAY) {
236                         curve_render_overlay_verts_edges_len_get(
237                                 nurbs,
238                                 &rdata->overlay.vert_len,
239                                 &rdata->overlay.edge_len);
240
241                         rdata->actnu = cu->actnu;
242                         rdata->actvert = cu->actvert;
243                 }
244                 if (types & CU_DATATYPE_NORMAL) {
245                         rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
246                 }
247         }
248         else {
249                 nurbs = &cu->nurb;
250         }
251
252         rdata->nurbs = nurbs;
253
254         rdata->text.edit_font = cu->editfont;
255
256         return rdata;
257 }
258
259 static void curve_render_data_free(CurveRenderData *rdata)
260 {
261 #if 0
262         if (rdata->loose_verts) {
263                 MEM_freeN(rdata->loose_verts);
264         }
265 #endif
266         MEM_freeN(rdata);
267 }
268
269 static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
270 {
271         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
272         return rdata->overlay.vert_len;
273 }
274
275 static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
276 {
277         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
278         return rdata->overlay.edge_len;
279 }
280
281 static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
282 {
283         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
284         return rdata->wire.vert_len;
285 }
286
287 static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
288 {
289         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
290         return rdata->wire.edge_len;
291 }
292
293 static int curve_render_data_wire_curve_len_get(const CurveRenderData *rdata)
294 {
295         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
296         return rdata->wire.curve_len;
297 }
298
299 static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
300 {
301         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
302         return rdata->normal.len;
303 }
304
305 static void curve_cd_calc_used_gpu_layers(int *cd_layers, struct GPUMaterial **gpumat_array, int gpumat_array_len)
306 {
307         GPUVertexAttribs gattribs = {{{0}}};
308         for (int i = 0; i < gpumat_array_len; i++) {
309                 struct GPUMaterial *gpumat = gpumat_array[i];
310                 if (gpumat == NULL) {
311                         continue;
312                 }
313                 GPU_material_vertex_attributes(gpumat, &gattribs);
314                 for (int j = 0; j < gattribs.totlayer; j++) {
315                         const char *name = gattribs.layer[j].name;
316                         int type = gattribs.layer[j].type;
317
318                         /* Curves cannot have named layers.
319                          * Note: We could relax this assumption later. */
320                         if (name[0] != '\0') {
321                                 continue;
322                         }
323
324                         if (type == CD_AUTO_FROM_NAME) {
325                                 type = CD_MTFACE;
326                         }
327
328                         switch (type) {
329                                 case CD_MTFACE:
330                                         *cd_layers |= CD_MLOOPUV;
331                                         break;
332                                 case CD_TANGENT:
333                                         /* Currently unsupported */
334                                         // *cd_layers |= CD_TANGENT;
335                                         break;
336                                 case CD_MCOL:
337                                         /* Curve object don't have Color data. */
338                                         break;
339                                 case CD_ORCO:
340                                         *cd_layers |= CD_ORCO;
341                                         break;
342                         }
343                 }
344         }
345 }
346
347 /* ---------------------------------------------------------------------- */
348 /* Curve GPUBatch Cache */
349
350 typedef struct CurveBatchCache {
351         struct {
352                 GPUVertBuf *pos_nor;
353                 GPUVertBuf *curves_pos;
354         } ordered;
355
356         struct {
357                 GPUVertBuf *pos_nor;
358                 GPUVertBuf *uv;
359
360                 GPUVertBuf *wireframe_data;
361         } tess;
362
363         struct {
364                 /* Curve points. Aligned with ordered.pos_nor */
365                 GPUVertBuf *curves_nor;
366                 GPUVertBuf *curves_weight; /* TODO. */
367                 /* Edit points (beztriples and bpoints) */
368                 GPUVertBuf *pos;
369                 GPUVertBuf *data;
370         } edit;
371
372         struct {
373                 GPUIndexBuf *surfaces_tris;
374                 GPUIndexBuf *curves_lines;
375                 /* Edit mode */
376                 GPUIndexBuf *edit_verts_points; /* Only control points. Not handles. */
377                 GPUIndexBuf *edit_lines;
378         } ibo;
379
380         struct {
381                 GPUBatch *surfaces;
382                 GPUBatch *curves;
383                 /* control handles and vertices */
384                 GPUBatch *edit_edges;
385                 GPUBatch *edit_verts;
386                 GPUBatch *edit_handles_verts;
387                 GPUBatch *edit_normals;
388                 /* Triangles for object mode wireframe. */
389                 GPUBatch *wire_triangles;
390         } batch;
391
392         GPUIndexBuf **surf_per_mat_tris;
393         GPUBatch **surf_per_mat;
394         int mat_len;
395         int cd_used, cd_needed;
396
397         /* settings to determine if cache is invalid */
398         bool is_dirty;
399         bool is_editmode;
400 } CurveBatchCache;
401
402 /* GPUBatch cache management. */
403
404 static bool curve_batch_cache_valid(Curve *cu)
405 {
406         CurveBatchCache *cache = cu->batch_cache;
407
408         if (cache == NULL) {
409                 return false;
410         }
411
412         if (cache->mat_len != max_ii(1, cu->totcol)) {
413                 return false;
414         }
415
416         if (cache->is_dirty) {
417                 return false;
418         }
419
420         if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
421                 return false;
422         }
423
424         if (cache->is_editmode) {
425                 if (cu->editfont) {
426                         /* TODO */
427                 }
428         }
429
430         return true;
431 }
432
433 static void curve_batch_cache_init(Curve *cu)
434 {
435         CurveBatchCache *cache = cu->batch_cache;
436
437         if (!cache) {
438                 cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
439         }
440         else {
441                 memset(cache, 0, sizeof(*cache));
442         }
443
444 #if 0
445         ListBase *nurbs;
446         if (cu->editnurb) {
447                 EditNurb *editnurb = cu->editnurb;
448                 nurbs = &editnurb->nurbs;
449         }
450         else {
451                 nurbs = &cu->nurb;
452         }
453 #endif
454
455         cache->cd_used = 0;
456         cache->mat_len = max_ii(1, cu->totcol);
457         cache->surf_per_mat_tris = MEM_mallocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len, __func__);
458         cache->surf_per_mat = MEM_mallocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
459
460         /* TODO Might be wiser to alloc in one chunck. */
461         for (int i = 0; i < cache->mat_len; ++i) {
462                 cache->surf_per_mat_tris[i] = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
463                 cache->surf_per_mat[i] = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
464         }
465
466         cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
467
468         cache->is_dirty = false;
469 }
470
471 static CurveBatchCache *curve_batch_cache_get(Curve *cu)
472 {
473         if (!curve_batch_cache_valid(cu)) {
474                 curve_batch_cache_clear(cu);
475                 curve_batch_cache_init(cu);
476         }
477         return cu->batch_cache;
478 }
479
480 void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
481 {
482         CurveBatchCache *cache = cu->batch_cache;
483         if (cache == NULL) {
484                 return;
485         }
486         switch (mode) {
487                 case BKE_CURVE_BATCH_DIRTY_ALL:
488                         cache->is_dirty = true;
489                         break;
490                 case BKE_CURVE_BATCH_DIRTY_SELECT:
491                         GPU_VERTBUF_DISCARD_SAFE(cache->edit.data);
492
493                         GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
494                         GPU_BATCH_DISCARD_SAFE(cache->batch.edit_verts);
495                         GPU_BATCH_DISCARD_SAFE(cache->batch.edit_handles_verts);
496                         break;
497                 default:
498                         BLI_assert(0);
499         }
500 }
501
502 static void curve_batch_cache_clear(Curve *cu)
503 {
504         CurveBatchCache *cache = cu->batch_cache;
505         if (!cache) {
506                 return;
507         }
508
509         for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); ++i) {
510                 GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
511                 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
512         }
513         for (int i = 0; i < sizeof(cache->tess) / sizeof(void *); ++i) {
514                 GPUVertBuf **vbo = (GPUVertBuf **)&cache->tess;
515                 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
516         }
517         for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); ++i) {
518                 GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
519                 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
520         }
521         for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); ++i) {
522                 GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
523                 GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
524         }
525         for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
526                 GPUBatch **batch = (GPUBatch **)&cache->batch;
527                 GPU_BATCH_DISCARD_SAFE(batch[i]);
528         }
529
530         for (int i = 0; i < cache->mat_len; ++i) {
531                 GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
532                 GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
533         }
534         MEM_SAFE_FREE(cache->surf_per_mat_tris);
535         MEM_SAFE_FREE(cache->surf_per_mat);
536         cache->mat_len = 0;
537         cache->cd_used = 0;
538 }
539
540 void DRW_curve_batch_cache_free(Curve *cu)
541 {
542         curve_batch_cache_clear(cu);
543         MEM_SAFE_FREE(cu->batch_cache);
544 }
545
546 /* -------------------------------------------------------------------- */
547
548 /** \name Private Curve Cache API
549  * \{ */
550
551 /* GPUBatch cache usage. */
552 static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curves_pos)
553 {
554         BLI_assert(rdata->ob_curve_cache != NULL);
555
556         static GPUVertFormat format = { 0 };
557         static struct { uint pos; } attr_id;
558         if (format.attr_len == 0) {
559                 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
560         }
561
562         const int vert_len = curve_render_data_wire_verts_len_get(rdata);
563         GPU_vertbuf_init_with_format(vbo_curves_pos, &format);
564         GPU_vertbuf_data_alloc(vbo_curves_pos, vert_len);
565
566         int v_idx = 0;
567         for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
568                 if (bl->nr <= 0) {
569                         continue;
570                 }
571                 const int i_end = v_idx + bl->nr;
572                 for (const BevPoint *bevp = bl->bevpoints; v_idx < i_end; v_idx++, bevp++) {
573                         GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, bevp->vec);
574                 }
575         }
576         for (const DispList *dl = rdata->ob_curve_cache->disp.first; dl; dl = dl->next) {
577                 if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
578                         for (int i = 0; i < dl->nr; v_idx++, i++) {
579                                 GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, &((float(*)[3])dl->verts)[i]);
580                         }
581                 }
582         }
583         BLI_assert(v_idx == vert_len);
584 }
585
586 static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
587 {
588         BLI_assert(rdata->ob_curve_cache != NULL);
589
590         const int vert_len = curve_render_data_wire_verts_len_get(rdata);
591         const int edge_len = curve_render_data_wire_edges_len_get(rdata);
592         const int curve_len = curve_render_data_wire_curve_len_get(rdata);
593         /* Count the last vertex or each strip and the primitive restart. */
594         const int index_len = edge_len + curve_len * 2;
595
596         GPUIndexBufBuilder elb;
597         GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len, true);
598
599         int v_idx = 0;
600         for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
601                 if (bl->nr <= 0) {
602                         continue;
603                 }
604                 const bool is_cyclic = bl->poly != -1;
605                 if (is_cyclic) {
606                         GPU_indexbuf_add_generic_vert(&elb, v_idx + (bl->nr - 1));
607                 }
608                 for (int i = 0; i < bl->nr; i++) {
609                         GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
610                 }
611                 GPU_indexbuf_add_primitive_restart(&elb);
612                 v_idx += bl->nr;
613         }
614         for (const DispList *dl = rdata->ob_curve_cache->disp.first; dl; dl = dl->next) {
615                 if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
616                         const bool is_cyclic = dl->type == DL_POLY;
617                         if (is_cyclic) {
618                                 GPU_indexbuf_add_generic_vert(&elb, v_idx + (dl->nr - 1));
619                         }
620                         for (int i = 0; i < dl->nr; i++) {
621                                 GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
622                         }
623                         GPU_indexbuf_add_primitive_restart(&elb);
624                         v_idx += dl->nr;
625                 }
626         }
627         GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
628 }
629
630 static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo_curves_nor)
631 {
632         static GPUVertFormat format = { 0 };
633         static struct { uint pos, nor, tan, rad; } attr_id;
634         if (format.attr_len == 0) {
635                 /* initialize vertex formats */
636                 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
637                 attr_id.rad = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
638                 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
639                 attr_id.tan = GPU_vertformat_attr_add(&format, "tan", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
640         }
641
642         int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
643         int vbo_len_used = 0;
644
645         GPU_vertbuf_init_with_format(vbo_curves_nor, &format);
646         GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
647
648         const BevList *bl;
649         const Nurb *nu;
650
651         for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first;
652              nu && bl;
653              bl = bl->next, nu = nu->next)
654         {
655                 const BevPoint *bevp = bl->bevpoints;
656                 int nr = bl->nr;
657                 int skip = nu->resolu / 16;
658
659                 while (nr-- > 0) { /* accounts for empty bevel lists */
660                         float nor[3] = {1.0f, 0.0f, 0.0f};
661                         mul_qt_v3(bevp->quat, nor);
662
663                         GPUPackedNormal pnor = GPU_normal_convert_i10_v3(nor);
664                         GPUPackedNormal ptan = GPU_normal_convert_i10_v3(bevp->dir);
665
666                         /* Only set attribs for one vertex. */
667                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
668                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.rad, vbo_len_used, &bevp->radius);
669                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.nor, vbo_len_used, &pnor);
670                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.tan, vbo_len_used, &ptan);
671                         vbo_len_used++;
672
673                         /* Skip the other vertex (it does not need to be offseted). */
674                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
675                         vbo_len_used++;
676
677                         bevp += skip + 1;
678                         nr -= skip;
679                 }
680         }
681         BLI_assert(vbo_len_used == verts_len_capacity);
682 }
683
684 static char beztriple_vflag_get(CurveRenderData *rdata, char flag, char col_id, int v_idx, int nu_id)
685 {
686         char vflag = 0;
687         SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERTEX_SELECTED);
688         SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERTEX_ACTIVE);
689         SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
690         /* handle color id */
691         vflag |= col_id << 4; /* << 4 because of EVEN_U_BIT */
692         return vflag;
693 }
694
695 static char bpoint_vflag_get(CurveRenderData *rdata, char flag, int v_idx, int nu_id, int u)
696 {
697         char vflag = 0;
698         SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERTEX_SELECTED);
699         SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERTEX_ACTIVE);
700         SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
701         SET_FLAG_FROM_TEST(vflag, ((u % 2) == 0), EVEN_U_BIT);
702         vflag |= COLOR_NURB_ULINE_ID << 4; /* << 4 because of EVEN_U_BIT */
703         return vflag;
704 }
705
706 static void curve_create_edit_data_and_handles(
707         CurveRenderData *rdata,
708         GPUVertBuf *vbo_pos, GPUVertBuf *vbo_data, GPUIndexBuf *ibo_edit_verts_points, GPUIndexBuf *ibo_edit_lines)
709 {
710         static GPUVertFormat format_pos = { 0 };
711         static GPUVertFormat format_data = { 0 };
712         static struct { uint pos, data; } attr_id;
713         if (format_pos.attr_len == 0) {
714                 /* initialize vertex formats */
715                 attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
716                 attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
717         }
718
719         int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
720         int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;
721         int vbo_len_used = 0;
722
723         if (DRW_TEST_ASSIGN_VBO(vbo_pos)) {
724                 GPU_vertbuf_init_with_format(vbo_pos, &format_pos);
725                 GPU_vertbuf_data_alloc(vbo_pos, verts_len_capacity);
726         }
727         if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
728                 GPU_vertbuf_init_with_format(vbo_data, &format_data);
729                 GPU_vertbuf_data_alloc(vbo_data, verts_len_capacity);
730         }
731
732         GPUIndexBufBuilder elb_verts, *elbp_verts = NULL;
733         GPUIndexBufBuilder elb_lines, *elbp_lines = NULL;
734         if (DRW_TEST_ASSIGN_IBO(ibo_edit_verts_points)) {
735                 elbp_verts = &elb_verts;
736                 GPU_indexbuf_init(elbp_verts, GPU_PRIM_POINTS, verts_len_capacity, verts_len_capacity);
737         }
738         if (DRW_TEST_ASSIGN_IBO(ibo_edit_lines)) {
739                 elbp_lines = &elb_lines;
740                 GPU_indexbuf_init(elbp_lines, GPU_PRIM_LINES, edges_len_capacity, verts_len_capacity);
741         }
742
743         int v_idx = 0, nu_id = 0;
744         for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
745                 const BezTriple *bezt = nu->bezt;
746                 const BPoint *bp = nu->bp;
747                 if (bezt && bezt->hide == false) {
748                         for (int a = 0; a < nu->pntsu; a++, bezt++) {
749                                 if (elbp_verts) {
750                                         GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 1);
751                                 }
752                                 if (elbp_lines) {
753                                         GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 0);
754                                         GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 2);
755                                 }
756                                 if (vbo_data) {
757                                         char vflag[3] = {
758                                                 beztriple_vflag_get(rdata, bezt->f1, bezt->h1, v_idx, nu_id),
759                                                 beztriple_vflag_get(rdata, bezt->f2, bezt->h1, v_idx, nu_id),
760                                                 beztriple_vflag_get(rdata, bezt->f3, bezt->h2, v_idx, nu_id)
761                                         };
762                                         for (int j = 0; j < 3; j++) {
763                                                 GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used + j, &vflag[j]);
764                                         }
765                                 }
766                                 if (vbo_pos) {
767                                         for (int j = 0; j < 3; j++) {
768                                                 GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used + j, bezt->vec[j]);
769                                         }
770                                 }
771                                 vbo_len_used += 3;
772                                 v_idx += 1;
773                         }
774                 }
775                 else if (bp) {
776                         int pt_len = nu->pntsu * nu->pntsv;
777                         for (int a = 0; a < pt_len; a++, bp++) {
778                                 int u = (a % nu->pntsu);
779                                 int v = (a / nu->pntsu);
780                                 /* Use indexed rendering for bezier.
781                                  * Specify all points and use indices to hide/show. */
782                                 if (elbp_verts && bp->hide == false) {
783                                         GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used);
784                                 }
785                                 if (elbp_lines && bp->hide == false) {
786                                         const BPoint *bp_next_u = (u < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
787                                         const BPoint *bp_next_v = (v < (nu->pntsv - 1)) ? &nu->bp[a + nu->pntsu] : NULL;
788                                         if (bp_next_u && (bp_next_u->hide == false)) {
789                                                 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + 1);
790                                         }
791                                         if (bp_next_v && (bp_next_v->hide == false)) {
792                                                 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + nu->pntsu);
793                                         }
794                                 }
795                                 if (vbo_data) {
796                                         char vflag = bpoint_vflag_get(rdata, bp->f1, v_idx, nu_id, u);
797                                         GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used, &vflag);
798                                 }
799                                 if (vbo_pos) {
800                                         GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used, bp->vec);
801                                 }
802                                 vbo_len_used += 1;
803                                 v_idx += 1;
804                         }
805                 }
806         }
807
808         /* Resize & Finish */
809         if (elbp_verts != NULL) {
810                 GPU_indexbuf_build_in_place(elbp_verts, ibo_edit_verts_points);
811         }
812         if (elbp_lines != NULL) {
813                 GPU_indexbuf_build_in_place(elbp_lines, ibo_edit_lines);
814         }
815         if (vbo_len_used != verts_len_capacity) {
816                 if (vbo_pos != NULL) {
817                         GPU_vertbuf_data_resize(vbo_pos, vbo_len_used);
818                 }
819                 if (vbo_data != NULL) {
820                         GPU_vertbuf_data_resize(vbo_data, vbo_len_used);
821                 }
822         }
823 }
824
825 /** \} */
826
827 /* -------------------------------------------------------------------- */
828
829 /** \name Public Object/Curve API
830  * \{ */
831
832 GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu)
833 {
834         CurveBatchCache *cache = curve_batch_cache_get(cu);
835         return DRW_batch_request(&cache->batch.curves);
836 }
837
838 GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu)
839 {
840         CurveBatchCache *cache = curve_batch_cache_get(cu);
841         return DRW_batch_request(&cache->batch.edit_normals);
842 }
843
844 GPUBatch *DRW_curve_batch_cache_get_edit_edges(Curve *cu)
845 {
846         CurveBatchCache *cache = curve_batch_cache_get(cu);
847         return DRW_batch_request(&cache->batch.edit_edges);
848 }
849
850 GPUBatch *DRW_curve_batch_cache_get_edit_verts(Curve *cu, bool handles)
851 {
852         CurveBatchCache *cache = curve_batch_cache_get(cu);
853         if (handles) {
854                 return DRW_batch_request(&cache->batch.edit_handles_verts);
855         }
856         else {
857                 return DRW_batch_request(&cache->batch.edit_verts);
858         }
859 }
860
861 GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu)
862 {
863         CurveBatchCache *cache = curve_batch_cache_get(cu);
864         return DRW_batch_request(&cache->batch.surfaces);
865 }
866
867 GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
868         struct Curve *cu,
869         struct GPUMaterial **gpumat_array, uint gpumat_array_len)
870 {
871         CurveBatchCache *cache = curve_batch_cache_get(cu);
872
873         BLI_assert(gpumat_array_len == cache->mat_len);
874
875         curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
876
877         for (int i = 0; i < cache->mat_len; ++i) {
878                 DRW_batch_request(&cache->surf_per_mat[i]);
879         }
880         return cache->surf_per_mat;
881 }
882
883 GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
884 {
885         CurveBatchCache *cache = curve_batch_cache_get(cu);
886         return DRW_batch_request(&cache->batch.wire_triangles);
887 }
888
889 /** \} */
890
891 /* -------------------------------------------------------------------- */
892 /** \name Grouped batch generation
893  * \{ */
894
895 void DRW_curve_batch_cache_create_requested(Object *ob)
896 {
897         BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
898
899         Curve *cu = ob->data;
900         CurveBatchCache *cache = curve_batch_cache_get(cu);
901
902         /* Verify that all surface batches have needed attrib layers. */
903         /* TODO(fclem): We could be a bit smarter here and only do it per material. */
904         for (int i = 0; i < cache->mat_len; ++i) {
905                 if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
906                         /* We can't discard batches at this point as they have been
907                          * referenced for drawing. Just clear them in place. */
908                         GPU_batch_clear(cache->surf_per_mat[i]);
909                         memset(cache->surf_per_mat[i], 0, sizeof(*cache->surf_per_mat[i]));
910                 }
911         }
912         if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
913                 cache->cd_used |= cache->cd_needed;
914                 cache->cd_needed = 0;
915         }
916
917         /* Init batches and request VBOs & IBOs */
918         if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
919                 DRW_ibo_request(cache->batch.surfaces, &cache->ibo.surfaces_tris);
920                 DRW_vbo_request(cache->batch.surfaces, &cache->ordered.pos_nor);
921         }
922         if (DRW_batch_requested(cache->batch.curves, GPU_PRIM_LINE_STRIP)) {
923                 DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
924                 DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
925         }
926         if (DRW_batch_requested(cache->batch.wire_triangles, GPU_PRIM_TRIS)) {
927                 DRW_vbo_request(cache->batch.wire_triangles, &cache->tess.pos_nor);
928                 DRW_vbo_request(cache->batch.wire_triangles, &cache->tess.wireframe_data);
929         }
930
931         /* Edit mode */
932         if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
933                 DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_lines);
934                 DRW_vbo_request(cache->batch.edit_edges, &cache->edit.pos);
935                 DRW_vbo_request(cache->batch.edit_edges, &cache->edit.data);
936         }
937         if (DRW_batch_requested(cache->batch.edit_verts, GPU_PRIM_POINTS)) {
938                 DRW_ibo_request(cache->batch.edit_verts, &cache->ibo.edit_verts_points);
939                 DRW_vbo_request(cache->batch.edit_verts, &cache->edit.pos);
940                 DRW_vbo_request(cache->batch.edit_verts, &cache->edit.data);
941         }
942         if (DRW_batch_requested(cache->batch.edit_handles_verts, GPU_PRIM_POINTS)) {
943                 DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.pos);
944                 DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.data);
945         }
946         if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
947                 DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
948         }
949         for (int i = 0; i < cache->mat_len; ++i) {
950                 if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
951                         if (cache->mat_len > 1) {
952                                 DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
953                         }
954                         if (cache->cd_used & CD_MLOOPUV) {
955                                 DRW_vbo_request(cache->surf_per_mat[i], &cache->tess.uv);
956                         }
957                         DRW_vbo_request(cache->surf_per_mat[i], &cache->tess.pos_nor);
958                 }
959         }
960
961         /* Generate MeshRenderData flags */
962         int mr_flag = 0;
963         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
964         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
965         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.pos_nor, CU_DATATYPE_SURFACE);
966         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.uv, CU_DATATYPE_SURFACE);
967         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.wireframe_data, CU_DATATYPE_SURFACE);
968         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
969         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
970
971         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
972         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.data, CU_DATATYPE_OVERLAY);
973         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_nor, CU_DATATYPE_NORMAL);
974         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_weight, CU_DATATYPE_OVERLAY);
975         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts_points, CU_DATATYPE_OVERLAY);
976         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
977
978         for (int i = 0; i < cache->mat_len; ++i) {
979                 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
980         }
981
982         CurveRenderData *rdata = curve_render_data_create(cu, ob->runtime.curve_cache, mr_flag);
983
984         /* DispLists */
985         ListBase *lb = &rdata->ob_curve_cache->disp;
986
987         /* Generate VBOs */
988         if (DRW_vbo_requested(cache->ordered.pos_nor)) {
989                 DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
990         }
991         if (DRW_vbo_requested(cache->ordered.curves_pos)) {
992                 curve_create_curves_pos(rdata, cache->ordered.curves_pos);
993         }
994
995         if (DRW_vbo_requested(cache->tess.pos_nor) ||
996             DRW_vbo_requested(cache->tess.uv))
997         {
998                 DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(lb, cache->tess.pos_nor, cache->tess.uv);
999         }
1000         if (DRW_vbo_requested(cache->tess.wireframe_data)) {
1001                 DRW_displist_vertbuf_create_wireframe_data_tess(lb, cache->tess.wireframe_data);
1002         }
1003
1004         if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
1005                 DRW_displist_indexbuf_create_triangles_tess_split_by_material(lb, cache->surf_per_mat_tris, cache->mat_len);
1006         }
1007
1008         if (DRW_ibo_requested(cache->ibo.curves_lines)) {
1009                 curve_create_curves_lines(rdata, cache->ibo.curves_lines);
1010         }
1011         if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
1012                 DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
1013         }
1014
1015         if (DRW_vbo_requested(cache->edit.pos) ||
1016             DRW_vbo_requested(cache->edit.data) ||
1017             DRW_ibo_requested(cache->ibo.edit_verts_points) ||
1018             DRW_ibo_requested(cache->ibo.edit_lines))
1019         {
1020                 curve_create_edit_data_and_handles(rdata, cache->edit.pos, cache->edit.data,
1021                                                           cache->ibo.edit_verts_points, cache->ibo.edit_lines);
1022         }
1023         if (DRW_vbo_requested(cache->edit.curves_nor)) {
1024                 curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
1025         }
1026
1027         curve_render_data_free(rdata);
1028
1029 #ifdef DEBUG
1030         /* Make sure all requested batches have been setup. */
1031         for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
1032                 BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
1033         }
1034 #endif
1035 }
1036
1037 /** \} */