Displist: Reuse tesselated pos and nor for wireframes
[blender.git] / source / blender / draw / intern / draw_cache_impl_curve.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file draw_cache_impl_curve.c
25  *  \ingroup draw
26  *
27  * \brief Curve API for render engines
28  */
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_utildefines.h"
33 #include "BLI_math_vector.h"
34
35 #include "DNA_curve_types.h"
36
37 #include "BKE_curve.h"
38
39 #include "BKE_font.h"
40
41 #include "GPU_batch.h"
42 #include "GPU_texture.h"
43 #include "GPU_material.h"
44
45 #include "UI_resources.h"
46
47 #include "DRW_render.h"
48
49 #include "draw_cache_impl.h"  /* own include */
50
51 #define SELECT            1
52 #define ACTIVE_NURB       1 << 2
53 #define EVEN_U_BIT        1 << 3 /* Alternate this bit for every U vert. */
54
55 /* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
56 enum {
57         COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
58
59         TOT_HANDLE_COL,
60 };
61
62 /**
63  * TODO
64  * - Ensure `CurveCache`, `SEQUENCER_DAG_WORKAROUND`.
65  * - Check number of verts/edges to see if cache is valid.
66  * - Check if 'overlay.edges' can use single attribute per edge, not 2 (for selection drawing).
67  */
68
69 static void curve_batch_cache_clear(Curve *cu);
70
71 /* ---------------------------------------------------------------------- */
72 /* Curve Interface, direct access to basic data. */
73
74 static void curve_render_overlay_verts_edges_len_get(
75         ListBase *lb, int *r_vert_len, int *r_edge_len)
76 {
77         BLI_assert(r_vert_len || r_edge_len);
78         int vert_len = 0;
79         int edge_len = 0;
80         for (Nurb *nu = lb->first; nu; nu = nu->next) {
81                 if (nu->bezt) {
82                         vert_len += nu->pntsu * 3;
83                         /* 2x handles per point*/
84                         edge_len += 2 * nu->pntsu;
85                 }
86                 else if (nu->bp) {
87                         vert_len += nu->pntsu * nu->pntsv;
88                         /* segments between points */
89                         edge_len += (nu->pntsu - 1) * nu->pntsv;
90                         edge_len += (nu->pntsv - 1) * nu->pntsu;
91                 }
92         }
93         if (r_vert_len) {
94                 *r_vert_len = vert_len;
95         }
96         if (r_edge_len) {
97                 *r_edge_len = edge_len;
98         }
99 }
100
101 static void curve_render_wire_verts_edges_len_get(
102         const CurveCache *ob_curve_cache,
103         int *r_curve_len, int *r_vert_len, int *r_edge_len)
104 {
105         BLI_assert(r_vert_len || r_edge_len);
106         int vert_len = 0;
107         int edge_len = 0;
108         *r_curve_len = 0;
109         for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
110                 if (bl->nr > 0) {
111                         const bool is_cyclic = bl->poly != -1;
112                         /* Curve */
113                         *r_curve_len += 1;
114
115                         /* verts */
116                         vert_len += bl->nr;
117
118                         /* edges */
119                         edge_len += bl->nr;
120                         if (!is_cyclic) {
121                                 edge_len -= 1;
122                         }
123                 }
124         }
125         if (r_vert_len) {
126                 *r_vert_len = vert_len;
127         }
128         if (r_edge_len) {
129                 *r_edge_len = edge_len;
130         }
131 }
132
133 static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
134 {
135         int normal_len = 0;
136         const BevList *bl;
137         const Nurb *nu;
138         for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
139                 int nr = bl->nr;
140                 int skip = nu->resolu / 16;
141 #if 0
142                 while (nr-- > 0) { /* accounts for empty bevel lists */
143                         normal_len += 1;
144                         nr -= skip;
145                 }
146 #else
147                 /* Same as loop above */
148                 normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
149 #endif
150         }
151         return normal_len;
152 }
153
154 /* ---------------------------------------------------------------------- */
155 /* Curve Interface, indirect, partially cached access to complex data. */
156
157 typedef struct CurveRenderData {
158         int types;
159
160         struct {
161                 int vert_len;
162                 int edge_len;
163         } overlay;
164
165         struct {
166                 int curve_len;
167                 int vert_len;
168                 int edge_len;
169         } wire;
170
171         /* edit mode normal's */
172         struct {
173                 /* 'edge_len == len * 2'
174                  * 'vert_len == len * 3' */
175                 int len;
176         } normal;
177
178         struct {
179                 EditFont *edit_font;
180         } text;
181
182         /* borrow from 'Object' */
183         CurveCache *ob_curve_cache;
184
185         /* borrow from 'Curve' */
186         ListBase *nurbs;
187
188         /* edit, index in nurb list */
189         int actnu;
190         /* edit, index in active nurb (BPoint or BezTriple) */
191         int actvert;
192 } CurveRenderData;
193
194 enum {
195         /* Wire center-line */
196         CU_DATATYPE_WIRE        = 1 << 0,
197         /* Edit-mode verts and optionally handles */
198         CU_DATATYPE_OVERLAY     = 1 << 1,
199         /* Edit-mode normals */
200         CU_DATATYPE_NORMAL      = 1 << 2,
201         /* Geometry */
202         CU_DATATYPE_SURFACE     = 1 << 3,
203         /* Text */
204         CU_DATATYPE_TEXT_SELECT = 1 << 4,
205 };
206
207 /*
208  * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
209  */
210 static CurveRenderData *curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
211 {
212         CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
213         rdata->types = types;
214         ListBase *nurbs;
215
216         rdata->actnu = cu->actnu;
217         rdata->actvert = cu->actvert;
218
219         rdata->ob_curve_cache = ob_curve_cache;
220
221         if (types & CU_DATATYPE_WIRE) {
222                 curve_render_wire_verts_edges_len_get(
223                         rdata->ob_curve_cache,
224                         &rdata->wire.curve_len, &rdata->wire.vert_len, &rdata->wire.edge_len);
225         }
226
227         if (cu->editnurb) {
228                 EditNurb *editnurb = cu->editnurb;
229                 nurbs = &editnurb->nurbs;
230
231                 if (types & CU_DATATYPE_OVERLAY) {
232                         curve_render_overlay_verts_edges_len_get(
233                                 nurbs,
234                                 &rdata->overlay.vert_len,
235                                 &rdata->overlay.edge_len);
236
237                         rdata->actnu = cu->actnu;
238                         rdata->actvert = cu->actvert;
239                 }
240                 if (types & CU_DATATYPE_NORMAL) {
241                         rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
242                 }
243         }
244         else {
245                 nurbs = &cu->nurb;
246         }
247
248         rdata->nurbs = nurbs;
249
250         rdata->text.edit_font = cu->editfont;
251
252         return rdata;
253 }
254
255 static void curve_render_data_free(CurveRenderData *rdata)
256 {
257 #if 0
258         if (rdata->loose_verts) {
259                 MEM_freeN(rdata->loose_verts);
260         }
261 #endif
262         MEM_freeN(rdata);
263 }
264
265 static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
266 {
267         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
268         return rdata->overlay.vert_len;
269 }
270
271 static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
272 {
273         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
274         return rdata->overlay.edge_len;
275 }
276
277 static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
278 {
279         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
280         return rdata->wire.vert_len;
281 }
282
283 static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
284 {
285         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
286         return rdata->wire.edge_len;
287 }
288
289 static int curve_render_data_wire_curve_len_get(const CurveRenderData *rdata)
290 {
291         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
292         return rdata->wire.curve_len;
293 }
294
295 static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
296 {
297         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
298         return rdata->normal.len;
299 }
300
301 static void curve_cd_calc_used_gpu_layers(int *cd_layers, struct GPUMaterial **gpumat_array, int gpumat_array_len)
302 {
303         GPUVertexAttribs gattribs = {{{0}}};
304         for (int i = 0; i < gpumat_array_len; i++) {
305                 struct GPUMaterial *gpumat = gpumat_array[i];
306                 if (gpumat == NULL) {
307                         continue;
308                 }
309                 GPU_material_vertex_attributes(gpumat, &gattribs);
310                 for (int j = 0; j < gattribs.totlayer; j++) {
311                         const char *name = gattribs.layer[j].name;
312                         int type = gattribs.layer[j].type;
313
314                         /* Curves cannot have named layers.
315                          * Note: We could relax this assumption later. */
316                         if (name[0] != '\0') {
317                                 continue;
318                         }
319
320                         if (type == CD_AUTO_FROM_NAME) {
321                                 type = CD_MTFACE;
322                         }
323
324                         switch (type) {
325                                 case CD_MTFACE:
326                                         *cd_layers |= CD_MLOOPUV;
327                                         break;
328                                 case CD_TANGENT:
329                                         /* Currently unsupported */
330                                         // *cd_layers |= CD_TANGENT;
331                                         break;
332                                 case CD_MCOL:
333                                         /* Curve object don't have Color data. */
334                                         break;
335                                 case CD_ORCO:
336                                         *cd_layers |= CD_ORCO;
337                                         break;
338                         }
339                 }
340         }
341 }
342
343 /* ---------------------------------------------------------------------- */
344 /* Curve GPUBatch Cache */
345
346 typedef struct CurveBatchCache {
347         struct {
348                 /* Split by normals if necessary. */
349                 GPUVertBuf *pos_nor;
350                 GPUVertBuf *curves_pos;
351         } ordered;
352
353         struct {
354                 GPUVertBuf *pos_nor;
355                 GPUVertBuf *uv;
356
357                 GPUVertBuf *wireframe_data;
358         } tess;
359
360         struct {
361                 /* Curve points. Aligned with ordered.pos_nor */
362                 GPUVertBuf *curves_nor;
363                 GPUVertBuf *curves_weight; /* TODO. */
364                 /* Edit points (beztriples and bpoints) */
365                 GPUVertBuf *pos;
366                 GPUVertBuf *data;
367         } edit;
368
369         struct {
370                 GPUIndexBuf *surfaces_tris;
371                 GPUIndexBuf *curves_lines;
372                 /* Edit mode */
373                 GPUIndexBuf *edit_verts_points; /* Only control points. Not handles. */
374                 GPUIndexBuf *edit_lines;
375         } ibo;
376
377         struct {
378                 GPUBatch *surfaces;
379                 GPUBatch *curves;
380                 /* control handles and vertices */
381                 GPUBatch *edit_edges;
382                 GPUBatch *edit_verts;
383                 GPUBatch *edit_handles_verts;
384                 GPUBatch *edit_normals;
385                 /* Triangles for object mode wireframe. */
386                 GPUBatch *wire_triangles;
387         } batch;
388
389         GPUIndexBuf **surf_per_mat_tris;
390         GPUBatch **surf_per_mat;
391         int mat_len;
392         int cd_used, cd_needed;
393
394         /* settings to determine if cache is invalid */
395         bool is_dirty;
396         bool is_editmode;
397 } CurveBatchCache;
398
399 /* GPUBatch cache management. */
400
401 static bool curve_batch_cache_valid(Curve *cu)
402 {
403         CurveBatchCache *cache = cu->batch_cache;
404
405         if (cache == NULL) {
406                 return false;
407         }
408
409         if (cache->mat_len != max_ii(1, cu->totcol)) {
410                 return false;
411         }
412
413         if (cache->is_dirty) {
414                 return false;
415         }
416
417         if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
418                 return false;
419         }
420
421         if (cache->is_editmode) {
422                 if (cu->editfont) {
423                         /* TODO */
424                 }
425         }
426
427         return true;
428 }
429
430 static void curve_batch_cache_init(Curve *cu)
431 {
432         CurveBatchCache *cache = cu->batch_cache;
433
434         if (!cache) {
435                 cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
436         }
437         else {
438                 memset(cache, 0, sizeof(*cache));
439         }
440
441 #if 0
442         ListBase *nurbs;
443         if (cu->editnurb) {
444                 EditNurb *editnurb = cu->editnurb;
445                 nurbs = &editnurb->nurbs;
446         }
447         else {
448                 nurbs = &cu->nurb;
449         }
450 #endif
451
452         cache->cd_used = 0;
453         cache->mat_len = max_ii(1, cu->totcol);
454         cache->surf_per_mat_tris = MEM_mallocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len, __func__);
455         cache->surf_per_mat = MEM_mallocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
456
457         /* TODO Might be wiser to alloc in one chunck. */
458         for (int i = 0; i < cache->mat_len; ++i) {
459                 cache->surf_per_mat_tris[i] = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
460                 cache->surf_per_mat[i] = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
461         }
462
463         cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
464
465         cache->is_dirty = false;
466 }
467
468 static CurveBatchCache *curve_batch_cache_get(Curve *cu)
469 {
470         if (!curve_batch_cache_valid(cu)) {
471                 curve_batch_cache_clear(cu);
472                 curve_batch_cache_init(cu);
473         }
474         return cu->batch_cache;
475 }
476
477 void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
478 {
479         CurveBatchCache *cache = cu->batch_cache;
480         if (cache == NULL) {
481                 return;
482         }
483         switch (mode) {
484                 case BKE_CURVE_BATCH_DIRTY_ALL:
485                         cache->is_dirty = true;
486                         break;
487                 case BKE_CURVE_BATCH_DIRTY_SELECT:
488                         GPU_VERTBUF_DISCARD_SAFE(cache->edit.data);
489
490                         GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
491                         GPU_BATCH_DISCARD_SAFE(cache->batch.edit_verts);
492                         GPU_BATCH_DISCARD_SAFE(cache->batch.edit_handles_verts);
493                         break;
494                 default:
495                         BLI_assert(0);
496         }
497 }
498
499 static void curve_batch_cache_clear(Curve *cu)
500 {
501         CurveBatchCache *cache = cu->batch_cache;
502         if (!cache) {
503                 return;
504         }
505
506         for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); ++i) {
507                 GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
508                 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
509         }
510         for (int i = 0; i < sizeof(cache->tess) / sizeof(void *); ++i) {
511                 GPUVertBuf **vbo = (GPUVertBuf **)&cache->tess;
512                 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
513         }
514         for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); ++i) {
515                 GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
516                 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
517         }
518         for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); ++i) {
519                 GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
520                 GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
521         }
522         for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
523                 GPUBatch **batch = (GPUBatch **)&cache->batch;
524                 GPU_BATCH_DISCARD_SAFE(batch[i]);
525         }
526
527         for (int i = 0; i < cache->mat_len; ++i) {
528                 GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
529                 GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
530         }
531         MEM_SAFE_FREE(cache->surf_per_mat_tris);
532         MEM_SAFE_FREE(cache->surf_per_mat);
533         cache->mat_len = 0;
534         cache->cd_used = 0;
535 }
536
537 void DRW_curve_batch_cache_free(Curve *cu)
538 {
539         curve_batch_cache_clear(cu);
540         MEM_SAFE_FREE(cu->batch_cache);
541 }
542
543 /* -------------------------------------------------------------------- */
544
545 /** \name Private Curve Cache API
546  * \{ */
547
548 /* GPUBatch cache usage. */
549 static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curves_pos)
550 {
551         BLI_assert(rdata->ob_curve_cache != NULL);
552
553         static GPUVertFormat format = { 0 };
554         static struct { uint pos; } attr_id;
555         if (format.attr_len == 0) {
556                 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
557         }
558
559         const int vert_len = curve_render_data_wire_verts_len_get(rdata);
560         GPU_vertbuf_init_with_format(vbo_curves_pos, &format);
561         GPU_vertbuf_data_alloc(vbo_curves_pos, vert_len);
562
563         int v_idx = 0;
564         for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
565                 if (bl->nr <= 0) {
566                         continue;
567                 }
568                 const int i_end = v_idx + bl->nr;
569                 for (const BevPoint *bevp = bl->bevpoints; v_idx < i_end; v_idx++, bevp++) {
570                         GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, bevp->vec);
571                 }
572         }
573         BLI_assert(v_idx == vert_len);
574 }
575
576 static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
577 {
578         BLI_assert(rdata->ob_curve_cache != NULL);
579
580         const int vert_len = curve_render_data_wire_verts_len_get(rdata);
581         const int edge_len = curve_render_data_wire_edges_len_get(rdata);
582         const int curve_len = curve_render_data_wire_curve_len_get(rdata);
583         /* Count the last vertex or each strip and the primitive restart. */
584         const int index_len = edge_len + curve_len * 2;
585
586         GPUIndexBufBuilder elb;
587         GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len, true);
588
589         int v_idx = 0;
590         for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
591                 if (bl->nr <= 0) {
592                         continue;
593                 }
594                 const bool is_cyclic = bl->poly != -1;
595                 if (is_cyclic) {
596                         GPU_indexbuf_add_generic_vert(&elb, v_idx + (bl->nr - 1));
597                 }
598                 for (int i = 0; i < bl->nr; i++) {
599                         GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
600                 }
601                 GPU_indexbuf_add_primitive_restart(&elb);
602                 v_idx += bl->nr;
603         }
604
605         GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
606 }
607
608 static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo_curves_nor)
609 {
610         static GPUVertFormat format = { 0 };
611         static struct { uint pos, nor, tan, rad; } attr_id;
612         if (format.attr_len == 0) {
613                 /* initialize vertex formats */
614                 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
615                 attr_id.rad = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
616                 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
617                 attr_id.tan = GPU_vertformat_attr_add(&format, "tan", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
618         }
619
620         int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
621         int vbo_len_used = 0;
622
623         GPU_vertbuf_init_with_format(vbo_curves_nor, &format);
624         GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
625
626         const BevList *bl;
627         const Nurb *nu;
628
629         for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first;
630              nu && bl;
631              bl = bl->next, nu = nu->next)
632         {
633                 const BevPoint *bevp = bl->bevpoints;
634                 int nr = bl->nr;
635                 int skip = nu->resolu / 16;
636
637                 while (nr-- > 0) { /* accounts for empty bevel lists */
638                         float nor[3] = {1.0f, 0.0f, 0.0f};
639                         mul_qt_v3(bevp->quat, nor);
640
641                         GPUPackedNormal pnor = GPU_normal_convert_i10_v3(nor);
642                         GPUPackedNormal ptan = GPU_normal_convert_i10_v3(bevp->dir);
643
644                         /* Only set attribs for one vertex. */
645                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
646                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.rad, vbo_len_used, &bevp->radius);
647                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.nor, vbo_len_used, &pnor);
648                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.tan, vbo_len_used, &ptan);
649                         vbo_len_used++;
650
651                         /* Skip the other vertex (it does not need to be offseted). */
652                         GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
653                         vbo_len_used++;
654
655                         bevp += skip + 1;
656                         nr -= skip;
657                 }
658         }
659         BLI_assert(vbo_len_used == verts_len_capacity);
660 }
661
662 static char beztriple_vflag_get(CurveRenderData *rdata, char flag, char col_id, int v_idx, int nu_id)
663 {
664         char vflag = 0;
665         SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERTEX_SELECTED);
666         SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERTEX_ACTIVE);
667         SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
668         /* handle color id */
669         vflag |= col_id << 4; /* << 4 because of EVEN_U_BIT */
670         return vflag;
671 }
672
673 static char bpoint_vflag_get(CurveRenderData *rdata, char flag, int v_idx, int nu_id, int u)
674 {
675         char vflag = 0;
676         SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERTEX_SELECTED);
677         SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERTEX_ACTIVE);
678         SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
679         SET_FLAG_FROM_TEST(vflag, ((u % 2) == 0), EVEN_U_BIT);
680         vflag |= COLOR_NURB_ULINE_ID << 4; /* << 4 because of EVEN_U_BIT */
681         return vflag;
682 }
683
684 static void curve_create_edit_data_and_handles(
685         CurveRenderData *rdata,
686         GPUVertBuf *vbo_pos, GPUVertBuf *vbo_data, GPUIndexBuf *ibo_edit_verts_points, GPUIndexBuf *ibo_edit_lines)
687 {
688         static GPUVertFormat format_pos = { 0 };
689         static GPUVertFormat format_data = { 0 };
690         static struct { uint pos, data; } attr_id;
691         if (format_pos.attr_len == 0) {
692                 /* initialize vertex formats */
693                 attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
694                 attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
695         }
696
697         int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
698         int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;
699         int vbo_len_used = 0;
700
701         if (DRW_TEST_ASSIGN_VBO(vbo_pos)) {
702                 GPU_vertbuf_init_with_format(vbo_pos, &format_pos);
703                 GPU_vertbuf_data_alloc(vbo_pos, verts_len_capacity);
704         }
705         if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
706                 GPU_vertbuf_init_with_format(vbo_data, &format_data);
707                 GPU_vertbuf_data_alloc(vbo_data, verts_len_capacity);
708         }
709
710         GPUIndexBufBuilder elb_verts, *elbp_verts = NULL;
711         GPUIndexBufBuilder elb_lines, *elbp_lines = NULL;
712         if (DRW_TEST_ASSIGN_IBO(ibo_edit_verts_points)) {
713                 elbp_verts = &elb_verts;
714                 GPU_indexbuf_init(elbp_verts, GPU_PRIM_POINTS, verts_len_capacity, verts_len_capacity);
715         }
716         if (DRW_TEST_ASSIGN_IBO(ibo_edit_lines)) {
717                 elbp_lines = &elb_lines;
718                 GPU_indexbuf_init(elbp_lines, GPU_PRIM_LINES, edges_len_capacity, verts_len_capacity);
719         }
720
721         int v_idx = 0, nu_id = 0;
722         for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
723                 const BezTriple *bezt = nu->bezt;
724                 const BPoint *bp = nu->bp;
725                 if (bezt && bezt->hide == false) {
726                         for (int a = 0; a < nu->pntsu; a++, bezt++) {
727                                 if (elbp_verts) {
728                                         GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 1);
729                                 }
730                                 if (elbp_lines) {
731                                         GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 0, vbo_len_used + 1);
732                                         GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 0, vbo_len_used + 2);
733                                 }
734                                 if (vbo_data) {
735                                         char vflag[3] = {
736                                                 beztriple_vflag_get(rdata, bezt->f1, bezt->h1, v_idx, nu_id),
737                                                 beztriple_vflag_get(rdata, bezt->f2, bezt->h1, v_idx, nu_id),
738                                                 beztriple_vflag_get(rdata, bezt->f3, bezt->h2, v_idx, nu_id)
739                                         };
740                                         for (int j = 0; j < 3; j++) {
741                                                 GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used + j, &vflag[j]);
742                                         }
743                                 }
744                                 if (vbo_pos) {
745                                         for (int j = 0; j < 3; j++) {
746                                                 GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used + j, bezt->vec[j]);
747                                         }
748                                 }
749                                 vbo_len_used += 3;
750                                 v_idx += 1;
751                         }
752                 }
753                 else if (bp) {
754                         int pt_len = nu->pntsu * nu->pntsv;
755                         for (int a = 0; a < pt_len; a++, bp++) {
756                                 int u = (a % nu->pntsu);
757                                 int v = (a / nu->pntsu);
758                                 /* Use indexed rendering for bezier.
759                                  * Specify all points and use indices to hide/show. */
760                                 if (elbp_verts && bp->hide == false) {
761                                         GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used);
762                                 }
763                                 if (elbp_lines && bp->hide == false) {
764                                         const BPoint *bp_next_u = (u < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
765                                         const BPoint *bp_next_v = (v < (nu->pntsv - 1)) ? &nu->bp[a + nu->pntsu] : NULL;
766                                         if (bp_next_u && (bp_next_u->hide == false)) {
767                                                 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + 1);
768                                         }
769                                         if (bp_next_v && (bp_next_v->hide == false)) {
770                                                 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + nu->pntsu);
771                                         }
772                                 }
773                                 if (vbo_data) {
774                                         char vflag = bpoint_vflag_get(rdata, bp->f1, v_idx, nu_id, u);
775                                         GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used, &vflag);
776                                 }
777                                 if (vbo_pos) {
778                                         GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used, bp->vec);
779                                 }
780                                 vbo_len_used += 1;
781                                 v_idx += 1;
782                         }
783                 }
784         }
785
786         /* Resize & Finish */
787         if (elbp_verts != NULL) {
788                 GPU_indexbuf_build_in_place(elbp_verts, ibo_edit_verts_points);
789         }
790         if (elbp_lines != NULL) {
791                 GPU_indexbuf_build_in_place(elbp_lines, ibo_edit_lines);
792         }
793         if (vbo_len_used != verts_len_capacity) {
794                 if (vbo_pos != NULL) {
795                         GPU_vertbuf_data_resize(vbo_pos, vbo_len_used);
796                 }
797                 if (vbo_data != NULL) {
798                         GPU_vertbuf_data_resize(vbo_data, vbo_len_used);
799                 }
800         }
801 }
802
803 /** \} */
804
805 /* -------------------------------------------------------------------- */
806
807 /** \name Public Object/Curve API
808  * \{ */
809
810 GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu)
811 {
812         CurveBatchCache *cache = curve_batch_cache_get(cu);
813         return DRW_batch_request(&cache->batch.curves);
814 }
815
816 GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu)
817 {
818         CurveBatchCache *cache = curve_batch_cache_get(cu);
819         return DRW_batch_request(&cache->batch.edit_normals);
820 }
821
822 GPUBatch *DRW_curve_batch_cache_get_edit_edges(Curve *cu)
823 {
824         CurveBatchCache *cache = curve_batch_cache_get(cu);
825         return DRW_batch_request(&cache->batch.edit_edges);
826 }
827
828 GPUBatch *DRW_curve_batch_cache_get_edit_verts(Curve *cu, bool handles)
829 {
830         CurveBatchCache *cache = curve_batch_cache_get(cu);
831         if (handles) {
832                 return DRW_batch_request(&cache->batch.edit_handles_verts);
833         }
834         else {
835                 return DRW_batch_request(&cache->batch.edit_verts);
836         }
837 }
838
839 GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu)
840 {
841         CurveBatchCache *cache = curve_batch_cache_get(cu);
842         return DRW_batch_request(&cache->batch.surfaces);
843 }
844
845 GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
846         struct Curve *cu,
847         struct GPUMaterial **gpumat_array, uint gpumat_array_len)
848 {
849         CurveBatchCache *cache = curve_batch_cache_get(cu);
850
851         BLI_assert(gpumat_array_len == cache->mat_len);
852
853         curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
854
855         for (int i = 0; i < cache->mat_len; ++i) {
856                 DRW_batch_request(&cache->surf_per_mat[i]);
857         }
858         return cache->surf_per_mat;
859 }
860
861 GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
862 {
863         CurveBatchCache *cache = curve_batch_cache_get(cu);
864         return DRW_batch_request(&cache->batch.wire_triangles);
865 }
866
867 /** \} */
868
869 /* -------------------------------------------------------------------- */
870 /** \name Grouped batch generation
871  * \{ */
872
873 void DRW_curve_batch_cache_create_requested(Object *ob)
874 {
875         BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
876
877         Curve *me = (Curve *)ob->data;
878         CurveBatchCache *cache = curve_batch_cache_get(me);
879
880         /* Verify that all surface batches have needed attrib layers. */
881         /* TODO(fclem): We could be a bit smarter here and only do it per material. */
882         for (int i = 0; i < cache->mat_len; ++i) {
883                 if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
884                         /* We can't discard batches at this point as they have been
885                          * referenced for drawing. Just clear them in place. */
886                         GPU_batch_clear(cache->surf_per_mat[i]);
887                         memset(cache->surf_per_mat[i], 0, sizeof(*cache->surf_per_mat[i]));
888                 }
889         }
890         cache->cd_used = cache->cd_needed;
891
892         /* Init batches and request VBOs & IBOs */
893         if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
894                 DRW_ibo_request(cache->batch.surfaces, &cache->ibo.surfaces_tris);
895                 DRW_vbo_request(cache->batch.surfaces, &cache->ordered.pos_nor);
896         }
897         if (DRW_batch_requested(cache->batch.curves, GPU_PRIM_LINE_STRIP)) {
898                 DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
899                 DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
900         }
901         if (DRW_batch_requested(cache->batch.wire_triangles, GPU_PRIM_TRIS)) {
902                 DRW_vbo_request(cache->batch.wire_triangles, &cache->tess.pos_nor);
903                 DRW_vbo_request(cache->batch.wire_triangles, &cache->tess.wireframe_data);
904         }
905
906         /* Edit mode */
907         if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
908                 DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_lines);
909                 DRW_vbo_request(cache->batch.edit_edges, &cache->edit.pos);
910                 DRW_vbo_request(cache->batch.edit_edges, &cache->edit.data);
911         }
912         if (DRW_batch_requested(cache->batch.edit_verts, GPU_PRIM_POINTS)) {
913                 DRW_ibo_request(cache->batch.edit_verts, &cache->ibo.edit_verts_points);
914                 DRW_vbo_request(cache->batch.edit_verts, &cache->edit.pos);
915                 DRW_vbo_request(cache->batch.edit_verts, &cache->edit.data);
916         }
917         if (DRW_batch_requested(cache->batch.edit_handles_verts, GPU_PRIM_POINTS)) {
918                 DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.pos);
919                 DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.data);
920         }
921         if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
922                 DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
923         }
924         for (int i = 0; i < cache->mat_len; ++i) {
925                 if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
926                         if (cache->mat_len > 1) {
927                                 DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
928                         }
929                         if (cache->cd_used & CD_MLOOPUV) {
930                                 DRW_vbo_request(cache->surf_per_mat[i], &cache->tess.uv);
931                         }
932                         DRW_vbo_request(cache->surf_per_mat[i], &cache->tess.pos_nor);
933                 }
934         }
935
936         /* Generate MeshRenderData flags */
937         int mr_flag = 0;
938         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
939         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
940         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.pos_nor, CU_DATATYPE_SURFACE);
941         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.uv, CU_DATATYPE_SURFACE);
942         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.wireframe_data, CU_DATATYPE_SURFACE);
943         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
944         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
945
946         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
947         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.data, CU_DATATYPE_OVERLAY);
948         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_nor, CU_DATATYPE_NORMAL);
949         DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_weight, CU_DATATYPE_OVERLAY);
950         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts_points, CU_DATATYPE_OVERLAY);
951         DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
952
953         for (int i = 0; i < cache->mat_len; ++i) {
954                 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
955         }
956
957         CurveRenderData *rdata = curve_render_data_create(me, ob->runtime.curve_cache, mr_flag);
958
959         /* DispLists */
960         ListBase *lb = &rdata->ob_curve_cache->disp;
961
962         /* Generate VBOs */
963         if (DRW_vbo_requested(cache->ordered.pos_nor)) {
964                 DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
965         }
966         if (DRW_vbo_requested(cache->ordered.curves_pos)) {
967                 curve_create_curves_pos(rdata, cache->ordered.curves_pos);
968         }
969
970         if (DRW_vbo_requested(cache->tess.pos_nor) ||
971                 DRW_vbo_requested(cache->tess.uv))
972         {
973                 DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(lb, cache->tess.pos_nor, cache->tess.uv);
974         }
975         if (DRW_vbo_requested(cache->tess.wireframe_data)) {
976                 DRW_displist_vertbuf_create_wireframe_data_tess(lb, cache->tess.wireframe_data);
977         }
978
979         if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
980                 DRW_displist_indexbuf_create_triangles_tess_split_by_material(lb, cache->surf_per_mat_tris, cache->mat_len);
981         }
982
983         if (DRW_ibo_requested(cache->ibo.curves_lines)) {
984                 curve_create_curves_lines(rdata, cache->ibo.curves_lines);
985         }
986         if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
987                 DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
988         }
989
990         if (DRW_vbo_requested(cache->edit.pos) ||
991             DRW_vbo_requested(cache->edit.data) ||
992             DRW_ibo_requested(cache->ibo.edit_verts_points) ||
993             DRW_ibo_requested(cache->ibo.edit_lines))
994         {
995                 curve_create_edit_data_and_handles(rdata, cache->edit.pos, cache->edit.data,
996                                                           cache->ibo.edit_verts_points, cache->ibo.edit_lines);
997         }
998         if (DRW_vbo_requested(cache->edit.curves_nor)) {
999                 curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
1000         }
1001
1002 #ifdef DEBUG
1003         /* Make sure all requested batches have been setup. */
1004         for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
1005                 GPUBatch **batch = (GPUBatch **)&cache->batch;
1006                 if (batch[i] != NULL) {
1007                         BLI_assert(batch[i]->verts[0] != NULL);
1008                 }
1009         }
1010 #endif
1011
1012         curve_render_data_free(rdata);
1013 }
1014
1015 /** \} */