Merge remote-tracking branch 'origin/master' into blender2.8
[blender.git] / source / blender / draw / intern / draw_cache_impl_curve.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file draw_cache_impl_curve.c
25  *  \ingroup draw
26  *
27  * \brief Curve API for render engines
28  */
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_utildefines.h"
33 #include "BLI_math_vector.h"
34
35 #include "DNA_curve_types.h"
36
37 #include "BKE_curve.h"
38
39 #include "BKE_font.h"
40
41 #include "GPU_batch.h"
42 #include "GPU_texture.h"
43
44 #include "UI_resources.h"
45
46 #include "DRW_render.h"
47
48 #include "draw_cache_impl.h"  /* own include */
49
50 #define SELECT            1
51 #define ACTIVE_NURB       1 << 2
52 #define EVEN_U_BIT        1 << 3 /* Alternate this bit for every U vert. */
53
54 /* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
55 enum {
56         COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
57
58         TOT_HANDLE_COL,
59 };
60
61 /**
62  * TODO
63  * - Ensure `CurveCache`, `SEQUENCER_DAG_WORKAROUND`.
64  * - Check number of verts/edges to see if cache is valid.
65  * - Check if 'overlay.edges' can use single attribute per edge, not 2 (for selection drawing).
66  */
67
68 static void curve_batch_cache_clear(Curve *cu);
69
70 /* ---------------------------------------------------------------------- */
71 /* Curve Interface, direct access to basic data. */
72
73 static void curve_render_overlay_verts_edges_len_get(
74         ListBase *lb, int *r_vert_len, int *r_edge_len)
75 {
76         BLI_assert(r_vert_len || r_edge_len);
77         int vert_len = 0;
78         int edge_len = 0;
79         for (Nurb *nu = lb->first; nu; nu = nu->next) {
80                 if (nu->bezt) {
81                         vert_len += nu->pntsu * 3;
82                         /* 2x handles per point*/
83                         edge_len += 2 * nu->pntsu;
84                 }
85                 else if (nu->bp) {
86                         vert_len += nu->pntsu * nu->pntsv;
87                         /* segments between points */
88                         edge_len += (nu->pntsu - 1) * nu->pntsv;
89                         edge_len += (nu->pntsv - 1) * nu->pntsu;
90                 }
91         }
92         if (r_vert_len) {
93                 *r_vert_len = vert_len;
94         }
95         if (r_edge_len) {
96                 *r_edge_len = edge_len;
97         }
98 }
99
100 static void curve_render_wire_verts_edges_len_get(
101         const CurveCache *ob_curve_cache,
102         int *r_vert_len, int *r_edge_len)
103 {
104         BLI_assert(r_vert_len || r_edge_len);
105         int vert_len = 0;
106         int edge_len = 0;
107         for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
108                 if (bl->nr > 0) {
109                         const bool is_cyclic = bl->poly != -1;
110
111                         /* verts */
112                         vert_len += bl->nr;
113
114                         /* edges */
115                         edge_len += bl->nr;
116                         if (!is_cyclic) {
117                                 edge_len -= 1;
118                         }
119                 }
120         }
121         if (r_vert_len) {
122                 *r_vert_len = vert_len;
123         }
124         if (r_edge_len) {
125                 *r_edge_len = edge_len;
126         }
127 }
128
129 static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
130 {
131         int normal_len = 0;
132         const BevList *bl;
133         const Nurb *nu;
134         for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
135                 int nr = bl->nr;
136                 int skip = nu->resolu / 16;
137 #if 0
138                 while (nr-- > 0) { /* accounts for empty bevel lists */
139                         normal_len += 1;
140                         nr -= skip;
141                 }
142 #else
143                 /* Same as loop above */
144                 normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
145 #endif
146         }
147         return normal_len;
148 }
149
150 /* ---------------------------------------------------------------------- */
151 /* Curve Interface, indirect, partially cached access to complex data. */
152
153 typedef struct CurveRenderData {
154         int types;
155
156         struct {
157                 int vert_len;
158                 int edge_len;
159         } overlay;
160
161         struct {
162                 int vert_len;
163                 int edge_len;
164         } wire;
165
166         /* edit mode normal's */
167         struct {
168                 /* 'edge_len == len * 2'
169                  * 'vert_len == len * 3' */
170                 int len;
171         } normal;
172
173         struct {
174                 EditFont *edit_font;
175         } text;
176
177         /* borrow from 'Object' */
178         CurveCache *ob_curve_cache;
179
180         /* borrow from 'Curve' */
181         ListBase *nurbs;
182
183         /* edit, index in nurb list */
184         int actnu;
185         /* edit, index in active nurb (BPoint or BezTriple) */
186         int actvert;
187 } CurveRenderData;
188
189 enum {
190         /* Wire center-line */
191         CU_DATATYPE_WIRE        = 1 << 0,
192         /* Edit-mode verts and optionally handles */
193         CU_DATATYPE_OVERLAY     = 1 << 1,
194         /* Edit-mode normals */
195         CU_DATATYPE_NORMAL      = 1 << 2,
196         /* Geometry */
197         CU_DATATYPE_SURFACE     = 1 << 3,
198         /* Text */
199         CU_DATATYPE_TEXT_SELECT = 1 << 4,
200 };
201
202 /*
203  * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
204  */
205 static CurveRenderData *curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
206 {
207         CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
208         rdata->types = types;
209         ListBase *nurbs;
210
211         rdata->actnu = cu->actnu;
212         rdata->actvert = cu->actvert;
213
214         rdata->ob_curve_cache = ob_curve_cache;
215
216         if (types & CU_DATATYPE_WIRE) {
217                 curve_render_wire_verts_edges_len_get(
218                         rdata->ob_curve_cache,
219                         &rdata->wire.vert_len, &rdata->wire.edge_len);
220         }
221
222         if (cu->editnurb) {
223                 EditNurb *editnurb = cu->editnurb;
224                 nurbs = &editnurb->nurbs;
225
226                 if (types & CU_DATATYPE_OVERLAY) {
227                         curve_render_overlay_verts_edges_len_get(
228                                 nurbs,
229                                 &rdata->overlay.vert_len,
230                                 &rdata->overlay.edge_len);
231
232                         rdata->actnu = cu->actnu;
233                         rdata->actvert = cu->actvert;
234                 }
235                 if (types & CU_DATATYPE_NORMAL) {
236                         rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
237                 }
238         }
239         else {
240                 nurbs = &cu->nurb;
241         }
242
243         rdata->nurbs = nurbs;
244
245         rdata->text.edit_font = cu->editfont;
246
247         return rdata;
248 }
249
250 static void curve_render_data_free(CurveRenderData *rdata)
251 {
252 #if 0
253         if (rdata->loose_verts) {
254                 MEM_freeN(rdata->loose_verts);
255         }
256 #endif
257         MEM_freeN(rdata);
258 }
259
260 static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
261 {
262         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
263         return rdata->overlay.vert_len;
264 }
265
266 static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
267 {
268         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
269         return rdata->overlay.edge_len;
270 }
271
272 static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
273 {
274         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
275         return rdata->wire.vert_len;
276 }
277
278 static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
279 {
280         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
281         return rdata->wire.edge_len;
282 }
283
284 static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
285 {
286         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
287         return rdata->normal.len;
288 }
289
290
291 /* ---------------------------------------------------------------------- */
292 /* Curve GPUBatch Cache */
293
294 typedef struct CurveBatchCache {
295         /* center-line */
296         struct {
297                 GPUVertBuf *verts;
298                 GPUVertBuf *edges;
299                 GPUBatch *batch;
300                 GPUIndexBuf *elem;
301         } wire;
302
303         /* normals */
304         struct {
305                 GPUVertBuf *verts;
306                 GPUVertBuf *edges;
307                 GPUBatch *batch;
308                 GPUIndexBuf *elem;
309         } normal;
310
311         /* control handles and vertices */
312         struct {
313                 GPUBatch *edges;
314                 GPUBatch *verts;
315                 GPUBatch *verts_no_handles;
316         } overlay;
317
318         struct {
319                 GPUVertBuf *verts;
320                 GPUIndexBuf *triangles_in_order;
321                 GPUBatch **shaded_triangles;
322                 GPUBatch *batch;
323                 int mat_len;
324         } surface;
325
326         /* Wireframes */
327         struct {
328                 GPUBatch *batch;
329         } face_wire;
330
331         /* 3d text */
332         struct {
333                 GPUBatch *select;
334                 GPUBatch *cursor;
335         } text;
336
337         /* settings to determine if cache is invalid */
338         bool is_dirty;
339
340         float normal_size;
341
342         bool is_editmode;
343 } CurveBatchCache;
344
345 /* GPUBatch cache management. */
346
347 static bool curve_batch_cache_valid(Curve *cu)
348 {
349         CurveBatchCache *cache = cu->batch_cache;
350
351         if (cache == NULL) {
352                 return false;
353         }
354
355         if (cache->is_dirty) {
356                 return false;
357         }
358
359         if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
360                 return false;
361         }
362
363         if (cache->is_editmode) {
364                 if (cu->editfont) {
365                         /* TODO */
366                 }
367         }
368
369         return true;
370 }
371
372 static void curve_batch_cache_init(Curve *cu)
373 {
374         CurveBatchCache *cache = cu->batch_cache;
375
376         if (!cache) {
377                 cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
378         }
379         else {
380                 memset(cache, 0, sizeof(*cache));
381         }
382
383 #if 0
384         ListBase *nurbs;
385         if (cu->editnurb) {
386                 EditNurb *editnurb = cu->editnurb;
387                 nurbs = &editnurb->nurbs;
388         }
389         else {
390                 nurbs = &cu->nurb;
391         }
392 #endif
393
394         cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
395
396         cache->is_dirty = false;
397 }
398
399 static CurveBatchCache *curve_batch_cache_get(Curve *cu)
400 {
401         if (!curve_batch_cache_valid(cu)) {
402                 curve_batch_cache_clear(cu);
403                 curve_batch_cache_init(cu);
404         }
405         return cu->batch_cache;
406 }
407
408 void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
409 {
410         CurveBatchCache *cache = cu->batch_cache;
411         if (cache == NULL) {
412                 return;
413         }
414         switch (mode) {
415                 case BKE_CURVE_BATCH_DIRTY_ALL:
416                         cache->is_dirty = true;
417                         break;
418                 case BKE_CURVE_BATCH_DIRTY_SELECT:
419                         /* editnurb */
420                         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts_no_handles);
421                         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts);
422                         GPU_BATCH_DISCARD_SAFE(cache->overlay.edges);
423
424                         /* editfont */
425                         GPU_BATCH_DISCARD_SAFE(cache->text.select);
426                         GPU_BATCH_DISCARD_SAFE(cache->text.cursor);
427                         break;
428                 default:
429                         BLI_assert(0);
430         }
431 }
432
433 static void curve_batch_cache_clear(Curve *cu)
434 {
435         CurveBatchCache *cache = cu->batch_cache;
436         if (!cache) {
437                 return;
438         }
439
440         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts_no_handles);
441         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts);
442         GPU_BATCH_DISCARD_SAFE(cache->overlay.edges);
443
444         GPU_VERTBUF_DISCARD_SAFE(cache->surface.verts);
445         GPU_INDEXBUF_DISCARD_SAFE(cache->surface.triangles_in_order);
446
447         GPU_BATCH_DISCARD_ARRAY_SAFE(cache->surface.shaded_triangles, cache->surface.mat_len);
448         GPU_BATCH_DISCARD_SAFE(cache->surface.batch);
449
450         GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
451
452         /* don't own vbo & elems */
453         GPU_BATCH_DISCARD_SAFE(cache->wire.batch);
454         GPU_VERTBUF_DISCARD_SAFE(cache->wire.verts);
455         GPU_VERTBUF_DISCARD_SAFE(cache->wire.edges);
456         GPU_INDEXBUF_DISCARD_SAFE(cache->wire.elem);
457
458         /* don't own vbo & elems */
459         GPU_BATCH_DISCARD_SAFE(cache->normal.batch);
460         GPU_VERTBUF_DISCARD_SAFE(cache->normal.verts);
461         GPU_VERTBUF_DISCARD_SAFE(cache->normal.edges);
462         GPU_INDEXBUF_DISCARD_SAFE(cache->normal.elem);
463
464         /* 3d text */
465         GPU_BATCH_DISCARD_SAFE(cache->text.cursor);
466         GPU_BATCH_DISCARD_SAFE(cache->text.select);
467 }
468
469 void DRW_curve_batch_cache_free(Curve *cu)
470 {
471         curve_batch_cache_clear(cu);
472         MEM_SAFE_FREE(cu->batch_cache);
473 }
474
475 /* -------------------------------------------------------------------- */
476
477 /** \name Private Curve Cache API
478  * \{ */
479
480 /* GPUBatch cache usage. */
481 static GPUVertBuf *curve_batch_cache_get_wire_verts(CurveRenderData *rdata, CurveBatchCache *cache)
482 {
483         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
484         BLI_assert(rdata->ob_curve_cache != NULL);
485
486         if (cache->wire.verts == NULL) {
487                 static GPUVertFormat format = { 0 };
488                 static struct { uint pos; } attr_id;
489                 if (format.attr_len == 0) {
490                         /* initialize vertex format */
491                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
492                 }
493
494                 const int vert_len = curve_render_data_wire_verts_len_get(rdata);
495
496                 GPUVertBuf *vbo = cache->wire.verts = GPU_vertbuf_create_with_format(&format);
497                 GPU_vertbuf_data_alloc(vbo, vert_len);
498                 int vbo_len_used = 0;
499                 for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
500                         if (bl->nr > 0) {
501                                 const int i_end = vbo_len_used + bl->nr;
502                                 for (const BevPoint *bevp = bl->bevpoints; vbo_len_used < i_end; vbo_len_used++, bevp++) {
503                                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bevp->vec);
504                                 }
505                         }
506                 }
507                 BLI_assert(vbo_len_used == vert_len);
508         }
509
510         return cache->wire.verts;
511 }
512
513 static GPUIndexBuf *curve_batch_cache_get_wire_edges(CurveRenderData *rdata, CurveBatchCache *cache)
514 {
515         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
516         BLI_assert(rdata->ob_curve_cache != NULL);
517
518         if (cache->wire.edges == NULL) {
519                 const int vert_len = curve_render_data_wire_verts_len_get(rdata);
520                 const int edge_len = curve_render_data_wire_edges_len_get(rdata);
521                 int edge_len_used = 0;
522
523                 GPUIndexBufBuilder elb;
524                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
525
526                 int i = 0;
527                 for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
528                         if (bl->nr > 0) {
529                                 const bool is_cyclic = bl->poly != -1;
530                                 const int i_end = i + (bl->nr);
531                                 int i_prev;
532                                 if (is_cyclic) {
533                                         i_prev = i + (bl->nr - 1);
534                                 }
535                                 else {
536                                         i_prev = i;
537                                         i += 1;
538                                 }
539                                 for (; i < i_end; i_prev = i++) {
540                                         GPU_indexbuf_add_line_verts(&elb, i_prev, i);
541                                         edge_len_used += 1;
542                                 }
543                         }
544                 }
545                 cache->wire.elem = GPU_indexbuf_build(&elb);
546         }
547
548         return cache->wire.elem;
549 }
550
551 static GPUVertBuf *curve_batch_cache_get_normal_verts(CurveRenderData *rdata, CurveBatchCache *cache)
552 {
553         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
554         BLI_assert(rdata->ob_curve_cache != NULL);
555
556         if (cache->normal.verts == NULL) {
557                 static GPUVertFormat format = { 0 };
558                 static struct { uint pos; } attr_id;
559                 if (format.attr_len == 0) {
560                         /* initialize vertex format */
561                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
562                 }
563
564                 const int normal_len = curve_render_data_normal_len_get(rdata);
565                 const int vert_len = normal_len * 3;
566
567                 GPUVertBuf *vbo = cache->normal.verts = GPU_vertbuf_create_with_format(&format);
568                 GPU_vertbuf_data_alloc(vbo, vert_len);
569                 int vbo_len_used = 0;
570
571                 const BevList *bl;
572                 const Nurb *nu;
573
574                 for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first;
575                      nu && bl;
576                      bl = bl->next, nu = nu->next)
577                 {
578                         const BevPoint *bevp = bl->bevpoints;
579                         int nr = bl->nr;
580                         int skip = nu->resolu / 16;
581
582                         while (nr-- > 0) { /* accounts for empty bevel lists */
583                                 const float fac = bevp->radius * cache->normal_size;
584                                 float vec_a[3]; /* Offset perpendicular to the curve */
585                                 float vec_b[3]; /* Delta along the curve */
586
587                                 vec_a[0] = fac;
588                                 vec_a[1] = 0.0f;
589                                 vec_a[2] = 0.0f;
590
591                                 mul_qt_v3(bevp->quat, vec_a);
592                                 madd_v3_v3fl(vec_a, bevp->dir, -fac);
593
594                                 reflect_v3_v3v3(vec_b, vec_a, bevp->dir);
595                                 negate_v3(vec_b);
596
597                                 add_v3_v3(vec_a, bevp->vec);
598                                 add_v3_v3(vec_b, bevp->vec);
599
600                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, vec_a);
601                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, bevp->vec);
602                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, vec_b);
603
604                                 bevp += skip + 1;
605                                 nr -= skip;
606                         }
607                 }
608                 BLI_assert(vbo_len_used == vert_len);
609         }
610
611         return cache->normal.verts;
612 }
613
614 static GPUIndexBuf *curve_batch_cache_get_normal_edges(CurveRenderData *rdata, CurveBatchCache *cache)
615 {
616         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
617         BLI_assert(rdata->ob_curve_cache != NULL);
618
619         if (cache->normal.edges == NULL) {
620                 const int normal_len = curve_render_data_normal_len_get(rdata);
621                 const int vert_len = normal_len * 3;
622                 const int edge_len = normal_len * 2;
623
624                 GPUIndexBufBuilder elb;
625                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
626
627                 int vbo_len_used = 0;
628                 for (int i = 0; i < normal_len; i++) {
629                         GPU_indexbuf_add_line_verts(&elb, vbo_len_used + 0, vbo_len_used + 1);
630                         GPU_indexbuf_add_line_verts(&elb, vbo_len_used + 1, vbo_len_used + 2);
631                         vbo_len_used += 3;
632                 }
633
634                 BLI_assert(vbo_len_used == vert_len);
635
636                 cache->normal.elem = GPU_indexbuf_build(&elb);
637         }
638
639         return cache->normal.elem;
640 }
641
642 static void curve_batch_cache_create_overlay_batches(Curve *cu)
643 {
644         /* Since CU_DATATYPE_OVERLAY is slow to generate, generate them all at once */
645         int options = CU_DATATYPE_OVERLAY;
646
647         CurveBatchCache *cache = curve_batch_cache_get(cu);
648         CurveRenderData *rdata = curve_render_data_create(cu, NULL, options);
649
650         if (cache->overlay.verts == NULL) {
651                 static GPUVertFormat format = { 0 };
652                 static struct { uint pos, data; } attr_id;
653                 if (format.attr_len == 0) {
654                         /* initialize vertex format */
655                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
656                         attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
657                 }
658
659                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
660                 const int vbo_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
661                 GPUIndexBufBuilder elb;
662                 GPU_indexbuf_init(&elb, GPU_PRIM_POINTS, vbo_len_capacity, vbo_len_capacity);
663                 int vbo_len_used = 0;
664                 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
665                 int i = 0, nu_id = 0;
666                 for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
667                         const bool is_active_nurb = (nu_id == cu->actnu);
668                         if (nu->bezt) {
669                                 int a = 0;
670                                 for (const BezTriple *bezt = nu->bezt; a < nu->pntsu; a++, bezt++) {
671                                         if (bezt->hide == false) {
672                                                 const bool is_active = (i == rdata->actvert);
673                                                 GPU_indexbuf_add_point_vert(&elb, vbo_len_used + 1);
674                                                 for (int j = 0; j < 3; j++) {
675                                                         char vflag = ((&bezt->f1)[j] & SELECT) ? VFLAG_VERTEX_SELECTED : 0;
676                                                         vflag |= (is_active) ? VFLAG_VERTEX_ACTIVE : 0;
677                                                         vflag |= (is_active_nurb) ? ACTIVE_NURB : 0;
678                                                         /* handle color id */
679                                                         char col_id = (&bezt->h1)[j / 2];
680                                                         vflag |= col_id << 4; /* << 4 because of EVEN_U_BIT */
681                                                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bezt->vec[j]);
682                                                         GPU_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
683                                                         vbo_len_used += 1;
684                                                 }
685                                         }
686                                         i += 1;
687                                 }
688                         }
689                         else if (nu->bp) {
690                                 int a = 0;
691                                 int pt_len = nu->pntsu * nu->pntsv;
692                                 for (const BPoint *bp = nu->bp; a < pt_len; a++, bp++) {
693                                         if (bp->hide == false) {
694                                                 const bool is_active = (i == rdata->actvert);
695                                                 char vflag = (bp->f1 & SELECT) ? VFLAG_VERTEX_SELECTED : 0;
696                                                 vflag |= (is_active) ? VFLAG_VERTEX_ACTIVE : 0;
697                                                 vflag |= (is_active_nurb) ? ACTIVE_NURB : 0;
698                                                 vflag |= (((a % nu->pntsu) % 2) == 0) ? EVEN_U_BIT : 0;
699                                                 vflag |= COLOR_NURB_ULINE_ID << 4; /* << 4 because of EVEN_U_BIT */
700                                                 GPU_indexbuf_add_point_vert(&elb, vbo_len_used);
701                                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bp->vec);
702                                                 GPU_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
703                                                 vbo_len_used += 1;
704                                         }
705                                         i += 1;
706                                 }
707                         }
708                         i += nu->pntsu;
709                 }
710                 if (vbo_len_capacity != vbo_len_used) {
711                         GPU_vertbuf_data_resize(vbo, vbo_len_used);
712                 }
713
714                 GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
715
716                 cache->overlay.verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
717                 cache->overlay.verts_no_handles = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, ibo, GPU_BATCH_OWNS_INDEX);
718         }
719
720         if (cache->overlay.edges == NULL) {
721                 GPUVertBuf *vbo = cache->overlay.verts->verts[0];
722
723                 const int edge_len =  curve_render_data_overlay_edges_len_get(rdata);
724                 const int vbo_len_capacity = edge_len * 2;
725
726                 GPUIndexBufBuilder elb;
727                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, vbo_len_capacity, vbo->vertex_len);
728
729                 int curr_index = 0;
730                 int i = 0;
731                 for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, i++) {
732                         if (nu->bezt) {
733                                 int a = 0;
734                                 for (const BezTriple *bezt = nu->bezt; a < nu->pntsu; a++, bezt++) {
735                                         if (bezt->hide == false) {
736                                                 GPU_indexbuf_add_line_verts(&elb, curr_index + 1, curr_index + 0);
737                                                 GPU_indexbuf_add_line_verts(&elb, curr_index + 1, curr_index + 2);
738                                                 curr_index += 3;
739                                         }
740                                 }
741                         }
742                         else if (nu->bp) {
743                                 int a = 0;
744                                 int next_v_index = curr_index;
745                                 for (const BPoint *bp = nu->bp; a < nu->pntsu; a++, bp++) {
746                                         if (bp->hide == false) {
747                                                 next_v_index += 1;
748                                         }
749                                 }
750
751                                 int pt_len = nu->pntsu * nu->pntsv;
752                                 for (a = 0; a < pt_len; a++) {
753                                         const BPoint *bp_curr = &nu->bp[a];
754                                         const BPoint *bp_next_u = ((a % nu->pntsu) < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
755                                         const BPoint *bp_next_v = (a < (pt_len - nu->pntsu)) ? &nu->bp[a + nu->pntsu] : NULL;
756                                         if (bp_curr->hide == false) {
757                                                 if (bp_next_u && (bp_next_u->hide == false)) {
758                                                         GPU_indexbuf_add_line_verts(&elb, curr_index, curr_index + 1);
759                                                 }
760                                                 if (bp_next_v && (bp_next_v->hide == false)) {
761                                                         GPU_indexbuf_add_line_verts(&elb, curr_index, next_v_index);
762                                                 }
763                                                 curr_index += 1;
764                                         }
765                                         if (bp_next_v && (bp_next_v->hide == false)) {
766                                                 next_v_index += 1;
767                                         }
768                                 }
769                         }
770                 }
771
772                 GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
773                 cache->overlay.edges = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, ibo, GPU_BATCH_OWNS_INDEX);
774         }
775
776         curve_render_data_free(rdata);
777 }
778
779 static GPUBatch *curve_batch_cache_get_pos_and_normals(CurveRenderData *rdata, CurveBatchCache *cache)
780 {
781         BLI_assert(rdata->types & CU_DATATYPE_SURFACE);
782         if (cache->surface.batch == NULL) {
783                 ListBase *lb = &rdata->ob_curve_cache->disp;
784
785                 if (cache->surface.verts == NULL) {
786                         cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
787                 }
788                 if (cache->surface.triangles_in_order == NULL) {
789                         cache->surface.triangles_in_order = DRW_displist_indexbuf_calc_triangles_in_order(lb);
790                 }
791                 cache->surface.batch = GPU_batch_create(
792                         GPU_PRIM_TRIS, cache->surface.verts, cache->surface.triangles_in_order);
793         }
794
795         return cache->surface.batch;
796 }
797
798 /** \} */
799
800
801 /* -------------------------------------------------------------------- */
802
803 /** \name Private Object/Font Cache API
804  * \{ */
805
806
807 static GPUBatch *curve_batch_cache_get_edit_select(CurveRenderData *rdata, CurveBatchCache *cache)
808 {
809         BLI_assert(rdata->types & CU_DATATYPE_TEXT_SELECT);
810         if (cache->text.select == NULL) {
811                 EditFont *ef = rdata->text.edit_font;
812                 static GPUVertFormat format = { 0 };
813                 static struct { uint pos; } attr_id;
814                 if (format.attr_len == 0) {
815                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
816                 }
817
818                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
819                 const int vbo_len_capacity = ef->selboxes_len * 6;
820                 int vbo_len_used = 0;
821                 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
822
823                 float box[4][3];
824
825                 /* fill in xy below */
826                 box[0][2] = box[1][2] = box[2][2] = box[3][2] = 0.001;
827
828                 for (int i = 0; i < ef->selboxes_len; i++) {
829                         EditFontSelBox *sb = &ef->selboxes[i];
830
831                         float selboxw;
832                         if (i + 1 != ef->selboxes_len) {
833                                 if (ef->selboxes[i + 1].y == sb->y)
834                                         selboxw = ef->selboxes[i + 1].x - sb->x;
835                                 else
836                                         selboxw = sb->w;
837                         }
838                         else {
839                                 selboxw = sb->w;
840                         }
841
842                         if (sb->rot == 0.0f) {
843                                 copy_v2_fl2(box[0], sb->x, sb->y);
844                                 copy_v2_fl2(box[1], sb->x + selboxw, sb->y);
845                                 copy_v2_fl2(box[2], sb->x + selboxw, sb->y + sb->h);
846                                 copy_v2_fl2(box[3], sb->x, sb->y + sb->h);
847                         }
848                         else {
849                                 float mat[2][2];
850
851                                 angle_to_mat2(mat, sb->rot);
852
853                                 copy_v2_fl2(box[0], sb->x, sb->y);
854
855                                 copy_v2_fl2(box[1], selboxw, 0.0f);
856                                 mul_m2v2(mat, box[1]);
857                                 add_v2_v2(box[1], &sb->x);
858
859                                 copy_v2_fl2(box[2], selboxw, sb->h);
860                                 mul_m2v2(mat, box[2]);
861                                 add_v2_v2(box[2], &sb->x);
862
863                                 copy_v2_fl2(box[3], 0.0f, sb->h);
864                                 mul_m2v2(mat, box[3]);
865                                 add_v2_v2(box[3], &sb->x);
866                         }
867
868                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[0]);
869                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[1]);
870                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[2]);
871
872                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[0]);
873                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[2]);
874                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[3]);
875                 }
876                 BLI_assert(vbo_len_used == vbo_len_capacity);
877                 cache->text.select = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
878         }
879         return cache->text.select;
880 }
881
882 static GPUBatch *curve_batch_cache_get_edit_cursor(CurveRenderData *rdata, CurveBatchCache *cache)
883 {
884         BLI_assert(rdata->types & CU_DATATYPE_TEXT_SELECT);
885         if (cache->text.cursor == NULL) {
886                 static GPUVertFormat format = { 0 };
887                 static struct { uint pos; } attr_id;
888                 if (format.attr_len == 0) {
889                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
890                 }
891
892                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
893                 const int vbo_len_capacity = 4;
894                 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
895                 for (int i = 0; i < 4; i++) {
896                         GPU_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->text.edit_font->textcurs[i]);
897                 }
898                 cache->text.cursor = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
899         }
900         return cache->text.cursor;
901 }
902
903 /** \} */
904
905 /* -------------------------------------------------------------------- */
906
907 /** \name Public Object/Curve API
908  * \{ */
909
910 GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu, CurveCache *ob_curve_cache)
911 {
912         CurveBatchCache *cache = curve_batch_cache_get(cu);
913
914         if (cache->wire.batch == NULL) {
915                 /* create batch from Curve */
916                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_WIRE);
917
918                 cache->wire.batch = GPU_batch_create(
919                         GPU_PRIM_LINES,
920                         curve_batch_cache_get_wire_verts(rdata, cache),
921                         curve_batch_cache_get_wire_edges(rdata, cache));
922
923                 curve_render_data_free(rdata);
924         }
925         return cache->wire.batch;
926 }
927
928 GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu, CurveCache *ob_curve_cache, float normal_size)
929 {
930         CurveBatchCache *cache = curve_batch_cache_get(cu);
931
932         if (cache->normal.batch != NULL) {
933                 cache->normal_size = normal_size;
934                 if (cache->normal_size != normal_size) {
935                         GPU_BATCH_DISCARD_SAFE(cache->normal.batch);
936                         GPU_VERTBUF_DISCARD_SAFE(cache->normal.edges);
937                 }
938         }
939         cache->normal_size = normal_size;
940
941         if (cache->normal.batch == NULL) {
942                 /* create batch from Curve */
943                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_NORMAL);
944
945                 cache->normal.batch = GPU_batch_create(
946                         GPU_PRIM_LINES,
947                         curve_batch_cache_get_normal_verts(rdata, cache),
948                         curve_batch_cache_get_normal_edges(rdata, cache));
949
950                 curve_render_data_free(rdata);
951                 cache->normal_size = normal_size;
952         }
953         return cache->normal.batch;
954 }
955
956 GPUBatch *DRW_curve_batch_cache_get_edit_edges(Curve *cu)
957 {
958         CurveBatchCache *cache = curve_batch_cache_get(cu);
959
960         if (cache->overlay.edges == NULL) {
961                 curve_batch_cache_create_overlay_batches(cu);
962         }
963
964         return cache->overlay.edges;
965 }
966
967 GPUBatch *DRW_curve_batch_cache_get_edit_verts(Curve *cu, bool handles)
968 {
969         CurveBatchCache *cache = curve_batch_cache_get(cu);
970
971         if (cache->overlay.verts == NULL || cache->overlay.verts_no_handles == NULL) {
972                 curve_batch_cache_create_overlay_batches(cu);
973         }
974
975         return (handles) ? cache->overlay.verts : cache->overlay.verts_no_handles;
976 }
977
978 GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(
979         struct Curve *cu, struct CurveCache *ob_curve_cache)
980 {
981         CurveBatchCache *cache = curve_batch_cache_get(cu);
982
983         if (cache->surface.batch == NULL) {
984                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
985
986                 curve_batch_cache_get_pos_and_normals(rdata, cache);
987
988                 curve_render_data_free(rdata);
989         }
990
991         return cache->surface.batch;
992 }
993
994 GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
995         struct Curve *cu, struct CurveCache *ob_curve_cache,
996         struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
997 {
998         CurveBatchCache *cache = curve_batch_cache_get(cu);
999
1000         if (cache->surface.mat_len != gpumat_array_len) {
1001                 GPU_BATCH_DISCARD_ARRAY_SAFE(cache->surface.shaded_triangles, cache->surface.mat_len);
1002         }
1003
1004         if (cache->surface.shaded_triangles == NULL) {
1005                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
1006                 ListBase *lb = &rdata->ob_curve_cache->disp;
1007
1008                 cache->surface.mat_len = gpumat_array_len;
1009                 if (cu->flag & CU_UV_ORCO) {
1010                         cache->surface.shaded_triangles = DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(
1011                                 lb, gpumat_array_len);
1012                 }
1013                 else {
1014                         cache->surface.shaded_triangles = MEM_mallocN(
1015                                 sizeof(*cache->surface.shaded_triangles) * gpumat_array_len, __func__);
1016                         GPUIndexBuf **el = DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(
1017                                 lb, gpumat_array_len);
1018
1019                         if (cache->surface.verts == NULL) {
1020                                 cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
1021                         }
1022
1023                         for (int i = 0; i < gpumat_array_len; ++i) {
1024                                 cache->surface.shaded_triangles[i] = GPU_batch_create_ex(
1025                                         GPU_PRIM_TRIS, cache->surface.verts, el[i], GPU_BATCH_OWNS_INDEX);
1026                         }
1027
1028                         MEM_freeN(el); /* Save `el` in cache? */
1029                 }
1030
1031                 curve_render_data_free(rdata);
1032         }
1033
1034         return cache->surface.shaded_triangles;
1035 }
1036
1037 GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu, CurveCache *ob_curve_cache)
1038 {
1039         CurveBatchCache *cache = curve_batch_cache_get(cu);
1040
1041         if (cache->face_wire.batch == NULL) {
1042                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
1043
1044                 ListBase *lb = &rdata->ob_curve_cache->disp;
1045
1046                 cache->face_wire.batch = DRW_displist_create_edges_overlay_batch(lb);
1047
1048                 curve_render_data_free(rdata);
1049         }
1050
1051         return cache->face_wire.batch;
1052 }
1053
1054 /* -------------------------------------------------------------------- */
1055
1056 /** \name Public Object/Font API
1057  * \{ */
1058
1059 GPUBatch *DRW_curve_batch_cache_get_edit_select(Curve *cu)
1060 {
1061         CurveBatchCache *cache = curve_batch_cache_get(cu);
1062
1063         if (cache->text.select == NULL) {
1064                 CurveRenderData *rdata = curve_render_data_create(cu, NULL, CU_DATATYPE_TEXT_SELECT);
1065
1066                 curve_batch_cache_get_edit_select(rdata, cache);
1067
1068                 curve_render_data_free(rdata);
1069         }
1070
1071         return cache->text.select;
1072 }
1073
1074 GPUBatch *DRW_curve_batch_cache_get_edit_cursor(Curve *cu)
1075 {
1076         CurveBatchCache *cache = curve_batch_cache_get(cu);
1077
1078         if (cache->text.cursor == NULL) {
1079                 CurveRenderData *rdata = curve_render_data_create(cu, NULL, CU_DATATYPE_TEXT_SELECT);
1080
1081                 curve_batch_cache_get_edit_cursor(rdata, cache);
1082
1083                 curve_render_data_free(rdata);
1084         }
1085
1086         return cache->text.cursor;
1087 }
1088
1089 /** \} */