Merge remote-tracking branch 'origin/master' into blender2.8
[blender.git] / source / blender / draw / intern / draw_cache_impl_curve.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file draw_cache_impl_curve.c
25  *  \ingroup draw
26  *
27  * \brief Curve API for render engines
28  */
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_utildefines.h"
33 #include "BLI_math_vector.h"
34
35 #include "DNA_curve_types.h"
36
37 #include "BKE_curve.h"
38
39 #include "BKE_font.h"
40
41 #include "GPU_batch.h"
42 #include "GPU_texture.h"
43
44 #include "UI_resources.h"
45
46 #include "DRW_render.h"
47
48 #include "draw_cache_impl.h"  /* own include */
49
50 #define SELECT            1
51 #define ACTIVE_NURB       1 << 2
52 #define EVEN_U_BIT        1 << 3 /* Alternate this bit for every U vert. */
53
54 /* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
55 enum {
56         COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
57
58         TOT_HANDLE_COL,
59 };
60
61 /**
62  * TODO
63  * - Ensure `CurveCache`, `SEQUENCER_DAG_WORKAROUND`.
64  * - Check number of verts/edges to see if cache is valid.
65  * - Check if 'overlay.edges' can use single attribute per edge, not 2 (for selection drawing).
66  */
67
68 static void curve_batch_cache_clear(Curve *cu);
69
70 /* ---------------------------------------------------------------------- */
71 /* Curve Interface, direct access to basic data. */
72
73 static void curve_render_overlay_verts_edges_len_get(
74         ListBase *lb, int *r_vert_len, int *r_edge_len)
75 {
76         BLI_assert(r_vert_len || r_edge_len);
77         int vert_len = 0;
78         int edge_len = 0;
79         for (Nurb *nu = lb->first; nu; nu = nu->next) {
80                 if (nu->bezt) {
81                         vert_len += nu->pntsu * 3;
82                         /* 2x handles per point*/
83                         edge_len += 2 * nu->pntsu;
84                 }
85                 else if (nu->bp) {
86                         vert_len += nu->pntsu * nu->pntsv;
87                         /* segments between points */
88                         edge_len += (nu->pntsu - 1) * nu->pntsv;
89                         edge_len += (nu->pntsv - 1) * nu->pntsu;
90                 }
91         }
92         if (r_vert_len) {
93                 *r_vert_len = vert_len;
94         }
95         if (r_edge_len) {
96                 *r_edge_len = edge_len;
97         }
98 }
99
100 static void curve_render_wire_verts_edges_len_get(
101         const CurveCache *ob_curve_cache,
102         int *r_vert_len, int *r_edge_len)
103 {
104         BLI_assert(r_vert_len || r_edge_len);
105         int vert_len = 0;
106         int edge_len = 0;
107         for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
108                 if (bl->nr > 0) {
109                         const bool is_cyclic = bl->poly != -1;
110
111                         /* verts */
112                         vert_len += bl->nr;
113
114                         /* edges */
115                         edge_len += bl->nr;
116                         if (!is_cyclic) {
117                                 edge_len -= 1;
118                         }
119                 }
120         }
121         if (r_vert_len) {
122                 *r_vert_len = vert_len;
123         }
124         if (r_edge_len) {
125                 *r_edge_len = edge_len;
126         }
127 }
128
129 static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
130 {
131         int normal_len = 0;
132         const BevList *bl;
133         const Nurb *nu;
134         for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
135                 int nr = bl->nr;
136                 int skip = nu->resolu / 16;
137 #if 0
138                 while (nr-- > 0) { /* accounts for empty bevel lists */
139                         normal_len += 1;
140                         nr -= skip;
141                 }
142 #else
143                 /* Same as loop above */
144                 normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
145 #endif
146         }
147         return normal_len;
148 }
149
150 /* ---------------------------------------------------------------------- */
151 /* Curve Interface, indirect, partially cached access to complex data. */
152
153 typedef struct CurveRenderData {
154         int types;
155
156         struct {
157                 int vert_len;
158                 int edge_len;
159         } overlay;
160
161         struct {
162                 int vert_len;
163                 int edge_len;
164         } wire;
165
166         /* edit mode normal's */
167         struct {
168                 /* 'edge_len == len * 2'
169                  * 'vert_len == len * 3' */
170                 int len;
171         } normal;
172
173         struct {
174                 EditFont *edit_font;
175         } text;
176
177         /* borrow from 'Object' */
178         CurveCache *ob_curve_cache;
179
180         /* borrow from 'Curve' */
181         ListBase *nurbs;
182
183         /* edit, index in nurb list */
184         int actnu;
185         /* edit, index in active nurb (BPoint or BezTriple) */
186         int actvert;
187 } CurveRenderData;
188
189 enum {
190         /* Wire center-line */
191         CU_DATATYPE_WIRE        = 1 << 0,
192         /* Edit-mode verts and optionally handles */
193         CU_DATATYPE_OVERLAY     = 1 << 1,
194         /* Edit-mode normals */
195         CU_DATATYPE_NORMAL      = 1 << 2,
196         /* Geometry */
197         CU_DATATYPE_SURFACE     = 1 << 3,
198         /* Text */
199         CU_DATATYPE_TEXT_SELECT = 1 << 4,
200 };
201
202 /*
203  * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
204  */
205 static CurveRenderData *curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
206 {
207         CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
208         rdata->types = types;
209         ListBase *nurbs;
210
211         rdata->actnu = cu->actnu;
212         rdata->actvert = cu->actvert;
213
214         rdata->ob_curve_cache = ob_curve_cache;
215
216         if (types & CU_DATATYPE_WIRE) {
217                 curve_render_wire_verts_edges_len_get(
218                         rdata->ob_curve_cache,
219                         &rdata->wire.vert_len, &rdata->wire.edge_len);
220         }
221
222         if (cu->editnurb) {
223                 EditNurb *editnurb = cu->editnurb;
224                 nurbs = &editnurb->nurbs;
225
226                 if (types & CU_DATATYPE_OVERLAY) {
227                         curve_render_overlay_verts_edges_len_get(
228                                 nurbs,
229                                 &rdata->overlay.vert_len,
230                                 &rdata->overlay.edge_len);
231
232                         rdata->actnu = cu->actnu;
233                         rdata->actvert = cu->actvert;
234                 }
235                 if (types & CU_DATATYPE_NORMAL) {
236                         rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
237                 }
238         }
239         else {
240                 nurbs = &cu->nurb;
241         }
242
243         rdata->nurbs = nurbs;
244
245         rdata->text.edit_font = cu->editfont;
246
247         return rdata;
248 }
249
250 static void curve_render_data_free(CurveRenderData *rdata)
251 {
252 #if 0
253         if (rdata->loose_verts) {
254                 MEM_freeN(rdata->loose_verts);
255         }
256 #endif
257         MEM_freeN(rdata);
258 }
259
260 static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
261 {
262         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
263         return rdata->overlay.vert_len;
264 }
265
266 static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
267 {
268         BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
269         return rdata->overlay.edge_len;
270 }
271
272 static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
273 {
274         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
275         return rdata->wire.vert_len;
276 }
277
278 static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
279 {
280         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
281         return rdata->wire.edge_len;
282 }
283
284 static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
285 {
286         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
287         return rdata->normal.len;
288 }
289
290
291 /* ---------------------------------------------------------------------- */
292 /* Curve GPUBatch Cache */
293
294 typedef struct CurveBatchCache {
295         /* center-line */
296         struct {
297                 GPUVertBuf *verts;
298                 GPUVertBuf *edges;
299                 GPUBatch *batch;
300                 GPUIndexBuf *elem;
301         } wire;
302
303         /* normals */
304         struct {
305                 GPUVertBuf *verts;
306                 GPUVertBuf *edges;
307                 GPUBatch *batch;
308                 GPUIndexBuf *elem;
309         } normal;
310
311         /* control handles and vertices */
312         struct {
313                 GPUBatch *edges;
314                 GPUBatch *verts;
315                 GPUBatch *verts_no_handles;
316         } overlay;
317
318         struct {
319                 GPUVertBuf *verts;
320                 GPUIndexBuf *triangles_in_order;
321                 GPUBatch **shaded_triangles;
322                 GPUBatch *batch;
323                 int mat_len;
324         } surface;
325
326         /* Wireframes */
327         struct {
328                 GPUVertBuf *elem_vbo;
329                 GPUTexture *elem_tx;
330                 GPUTexture *verts_tx;
331                 uint tri_count;
332         } face_wire;
333
334         /* 3d text */
335         struct {
336                 GPUBatch *select;
337                 GPUBatch *cursor;
338         } text;
339
340         /* settings to determine if cache is invalid */
341         bool is_dirty;
342
343         float normal_size;
344
345         bool is_editmode;
346 } CurveBatchCache;
347
348 /* GPUBatch cache management. */
349
350 static bool curve_batch_cache_valid(Curve *cu)
351 {
352         CurveBatchCache *cache = cu->batch_cache;
353
354         if (cache == NULL) {
355                 return false;
356         }
357
358         if (cache->is_dirty) {
359                 return false;
360         }
361
362         if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
363                 return false;
364         }
365
366         if (cache->is_editmode) {
367                 if (cu->editfont) {
368                         /* TODO */
369                 }
370         }
371
372         return true;
373 }
374
375 static void curve_batch_cache_init(Curve *cu)
376 {
377         CurveBatchCache *cache = cu->batch_cache;
378
379         if (!cache) {
380                 cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
381         }
382         else {
383                 memset(cache, 0, sizeof(*cache));
384         }
385
386 #if 0
387         ListBase *nurbs;
388         if (cu->editnurb) {
389                 EditNurb *editnurb = cu->editnurb;
390                 nurbs = &editnurb->nurbs;
391         }
392         else {
393                 nurbs = &cu->nurb;
394         }
395 #endif
396
397         cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
398
399         cache->is_dirty = false;
400 }
401
402 static CurveBatchCache *curve_batch_cache_get(Curve *cu)
403 {
404         if (!curve_batch_cache_valid(cu)) {
405                 curve_batch_cache_clear(cu);
406                 curve_batch_cache_init(cu);
407         }
408         return cu->batch_cache;
409 }
410
411 void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
412 {
413         CurveBatchCache *cache = cu->batch_cache;
414         if (cache == NULL) {
415                 return;
416         }
417         switch (mode) {
418                 case BKE_CURVE_BATCH_DIRTY_ALL:
419                         cache->is_dirty = true;
420                         break;
421                 case BKE_CURVE_BATCH_DIRTY_SELECT:
422                         /* editnurb */
423                         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts_no_handles);
424                         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts);
425                         GPU_BATCH_DISCARD_SAFE(cache->overlay.edges);
426
427                         /* editfont */
428                         GPU_BATCH_DISCARD_SAFE(cache->text.select);
429                         GPU_BATCH_DISCARD_SAFE(cache->text.cursor);
430                         break;
431                 default:
432                         BLI_assert(0);
433         }
434 }
435
436 static void curve_batch_cache_clear(Curve *cu)
437 {
438         CurveBatchCache *cache = cu->batch_cache;
439         if (!cache) {
440                 return;
441         }
442
443         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts_no_handles);
444         GPU_BATCH_DISCARD_SAFE(cache->overlay.verts);
445         GPU_BATCH_DISCARD_SAFE(cache->overlay.edges);
446
447         GPU_VERTBUF_DISCARD_SAFE(cache->surface.verts);
448         GPU_INDEXBUF_DISCARD_SAFE(cache->surface.triangles_in_order);
449
450         GPU_BATCH_DISCARD_ARRAY_SAFE(cache->surface.shaded_triangles, cache->surface.mat_len);
451         GPU_BATCH_DISCARD_SAFE(cache->surface.batch);
452
453         GPU_VERTBUF_DISCARD_SAFE(cache->face_wire.elem_vbo);
454         DRW_TEXTURE_FREE_SAFE(cache->face_wire.elem_tx);
455         DRW_TEXTURE_FREE_SAFE(cache->face_wire.verts_tx);
456         cache->face_wire.tri_count = 0;
457
458         /* don't own vbo & elems */
459         GPU_BATCH_DISCARD_SAFE(cache->wire.batch);
460         GPU_VERTBUF_DISCARD_SAFE(cache->wire.verts);
461         GPU_VERTBUF_DISCARD_SAFE(cache->wire.edges);
462         GPU_INDEXBUF_DISCARD_SAFE(cache->wire.elem);
463
464         /* don't own vbo & elems */
465         GPU_BATCH_DISCARD_SAFE(cache->normal.batch);
466         GPU_VERTBUF_DISCARD_SAFE(cache->normal.verts);
467         GPU_VERTBUF_DISCARD_SAFE(cache->normal.edges);
468         GPU_INDEXBUF_DISCARD_SAFE(cache->normal.elem);
469
470         /* 3d text */
471         GPU_BATCH_DISCARD_SAFE(cache->text.cursor);
472         GPU_BATCH_DISCARD_SAFE(cache->text.select);
473 }
474
475 void DRW_curve_batch_cache_free(Curve *cu)
476 {
477         curve_batch_cache_clear(cu);
478         MEM_SAFE_FREE(cu->batch_cache);
479 }
480
481 /* -------------------------------------------------------------------- */
482
483 /** \name Private Curve Cache API
484  * \{ */
485
486 /* GPUBatch cache usage. */
487 static GPUVertBuf *curve_batch_cache_get_wire_verts(CurveRenderData *rdata, CurveBatchCache *cache)
488 {
489         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
490         BLI_assert(rdata->ob_curve_cache != NULL);
491
492         if (cache->wire.verts == NULL) {
493                 static GPUVertFormat format = { 0 };
494                 static struct { uint pos; } attr_id;
495                 if (format.attr_len == 0) {
496                         /* initialize vertex format */
497                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
498                 }
499
500                 const int vert_len = curve_render_data_wire_verts_len_get(rdata);
501
502                 GPUVertBuf *vbo = cache->wire.verts = GPU_vertbuf_create_with_format(&format);
503                 GPU_vertbuf_data_alloc(vbo, vert_len);
504                 int vbo_len_used = 0;
505                 for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
506                         if (bl->nr > 0) {
507                                 const int i_end = vbo_len_used + bl->nr;
508                                 for (const BevPoint *bevp = bl->bevpoints; vbo_len_used < i_end; vbo_len_used++, bevp++) {
509                                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bevp->vec);
510                                 }
511                         }
512                 }
513                 BLI_assert(vbo_len_used == vert_len);
514         }
515
516         return cache->wire.verts;
517 }
518
519 static GPUIndexBuf *curve_batch_cache_get_wire_edges(CurveRenderData *rdata, CurveBatchCache *cache)
520 {
521         BLI_assert(rdata->types & CU_DATATYPE_WIRE);
522         BLI_assert(rdata->ob_curve_cache != NULL);
523
524         if (cache->wire.edges == NULL) {
525                 const int vert_len = curve_render_data_wire_verts_len_get(rdata);
526                 const int edge_len = curve_render_data_wire_edges_len_get(rdata);
527                 int edge_len_used = 0;
528
529                 GPUIndexBufBuilder elb;
530                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
531
532                 int i = 0;
533                 for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
534                         if (bl->nr > 0) {
535                                 const bool is_cyclic = bl->poly != -1;
536                                 const int i_end = i + (bl->nr);
537                                 int i_prev;
538                                 if (is_cyclic) {
539                                         i_prev = i + (bl->nr - 1);
540                                 }
541                                 else {
542                                         i_prev = i;
543                                         i += 1;
544                                 }
545                                 for (; i < i_end; i_prev = i++) {
546                                         GPU_indexbuf_add_line_verts(&elb, i_prev, i);
547                                         edge_len_used += 1;
548                                 }
549                         }
550                 }
551                 cache->wire.elem = GPU_indexbuf_build(&elb);
552         }
553
554         return cache->wire.elem;
555 }
556
557 static GPUVertBuf *curve_batch_cache_get_normal_verts(CurveRenderData *rdata, CurveBatchCache *cache)
558 {
559         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
560         BLI_assert(rdata->ob_curve_cache != NULL);
561
562         if (cache->normal.verts == NULL) {
563                 static GPUVertFormat format = { 0 };
564                 static struct { uint pos; } attr_id;
565                 if (format.attr_len == 0) {
566                         /* initialize vertex format */
567                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
568                 }
569
570                 const int normal_len = curve_render_data_normal_len_get(rdata);
571                 const int vert_len = normal_len * 3;
572
573                 GPUVertBuf *vbo = cache->normal.verts = GPU_vertbuf_create_with_format(&format);
574                 GPU_vertbuf_data_alloc(vbo, vert_len);
575                 int vbo_len_used = 0;
576
577                 const BevList *bl;
578                 const Nurb *nu;
579
580                 for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first;
581                      nu && bl;
582                      bl = bl->next, nu = nu->next)
583                 {
584                         const BevPoint *bevp = bl->bevpoints;
585                         int nr = bl->nr;
586                         int skip = nu->resolu / 16;
587
588                         while (nr-- > 0) { /* accounts for empty bevel lists */
589                                 const float fac = bevp->radius * cache->normal_size;
590                                 float vec_a[3]; /* Offset perpendicular to the curve */
591                                 float vec_b[3]; /* Delta along the curve */
592
593                                 vec_a[0] = fac;
594                                 vec_a[1] = 0.0f;
595                                 vec_a[2] = 0.0f;
596
597                                 mul_qt_v3(bevp->quat, vec_a);
598                                 madd_v3_v3fl(vec_a, bevp->dir, -fac);
599
600                                 reflect_v3_v3v3(vec_b, vec_a, bevp->dir);
601                                 negate_v3(vec_b);
602
603                                 add_v3_v3(vec_a, bevp->vec);
604                                 add_v3_v3(vec_b, bevp->vec);
605
606                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, vec_a);
607                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, bevp->vec);
608                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, vec_b);
609
610                                 bevp += skip + 1;
611                                 nr -= skip;
612                         }
613                 }
614                 BLI_assert(vbo_len_used == vert_len);
615         }
616
617         return cache->normal.verts;
618 }
619
620 static GPUIndexBuf *curve_batch_cache_get_normal_edges(CurveRenderData *rdata, CurveBatchCache *cache)
621 {
622         BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
623         BLI_assert(rdata->ob_curve_cache != NULL);
624
625         if (cache->normal.edges == NULL) {
626                 const int normal_len = curve_render_data_normal_len_get(rdata);
627                 const int vert_len = normal_len * 3;
628                 const int edge_len = normal_len * 2;
629
630                 GPUIndexBufBuilder elb;
631                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
632
633                 int vbo_len_used = 0;
634                 for (int i = 0; i < normal_len; i++) {
635                         GPU_indexbuf_add_line_verts(&elb, vbo_len_used + 0, vbo_len_used + 1);
636                         GPU_indexbuf_add_line_verts(&elb, vbo_len_used + 1, vbo_len_used + 2);
637                         vbo_len_used += 3;
638                 }
639
640                 BLI_assert(vbo_len_used == vert_len);
641
642                 cache->normal.elem = GPU_indexbuf_build(&elb);
643         }
644
645         return cache->normal.elem;
646 }
647
648 static void curve_batch_cache_create_overlay_batches(Curve *cu)
649 {
650         /* Since CU_DATATYPE_OVERLAY is slow to generate, generate them all at once */
651         int options = CU_DATATYPE_OVERLAY;
652
653         CurveBatchCache *cache = curve_batch_cache_get(cu);
654         CurveRenderData *rdata = curve_render_data_create(cu, NULL, options);
655
656         if (cache->overlay.verts == NULL) {
657                 static GPUVertFormat format = { 0 };
658                 static struct { uint pos, data; } attr_id;
659                 if (format.attr_len == 0) {
660                         /* initialize vertex format */
661                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
662                         attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
663                 }
664
665                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
666                 const int vbo_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
667                 GPUIndexBufBuilder elb;
668                 GPU_indexbuf_init(&elb, GPU_PRIM_POINTS, vbo_len_capacity, vbo_len_capacity);
669                 int vbo_len_used = 0;
670                 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
671                 int i = 0, nu_id = 0;
672                 for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
673                         const bool is_active_nurb = (nu_id == cu->actnu);
674                         if (nu->bezt) {
675                                 int a = 0;
676                                 for (const BezTriple *bezt = nu->bezt; a < nu->pntsu; a++, bezt++) {
677                                         if (bezt->hide == false) {
678                                                 const bool is_active = (i == rdata->actvert);
679                                                 GPU_indexbuf_add_point_vert(&elb, vbo_len_used + 1);
680                                                 for (int j = 0; j < 3; j++) {
681                                                         char vflag = ((&bezt->f1)[j] & SELECT) ? VFLAG_VERTEX_SELECTED : 0;
682                                                         vflag |= (is_active) ? VFLAG_VERTEX_ACTIVE : 0;
683                                                         vflag |= (is_active_nurb) ? ACTIVE_NURB : 0;
684                                                         /* handle color id */
685                                                         char col_id = (&bezt->h1)[j / 2];
686                                                         vflag |= col_id << 4; /* << 4 because of EVEN_U_BIT */
687                                                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bezt->vec[j]);
688                                                         GPU_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
689                                                         vbo_len_used += 1;
690                                                 }
691                                         }
692                                         i += 1;
693                                 }
694                         }
695                         else if (nu->bp) {
696                                 int a = 0;
697                                 int pt_len = nu->pntsu * nu->pntsv;
698                                 for (const BPoint *bp = nu->bp; a < pt_len; a++, bp++) {
699                                         if (bp->hide == false) {
700                                                 const bool is_active = (i == rdata->actvert);
701                                                 char vflag = (bp->f1 & SELECT) ? VFLAG_VERTEX_SELECTED : 0;
702                                                 vflag |= (is_active) ? VFLAG_VERTEX_ACTIVE : 0;
703                                                 vflag |= (is_active_nurb) ? ACTIVE_NURB : 0;
704                                                 vflag |= (((a % nu->pntsu) % 2) == 0) ? EVEN_U_BIT : 0;
705                                                 vflag |= COLOR_NURB_ULINE_ID << 4; /* << 4 because of EVEN_U_BIT */
706                                                 GPU_indexbuf_add_point_vert(&elb, vbo_len_used);
707                                                 GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bp->vec);
708                                                 GPU_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
709                                                 vbo_len_used += 1;
710                                         }
711                                         i += 1;
712                                 }
713                         }
714                         i += nu->pntsu;
715                 }
716                 if (vbo_len_capacity != vbo_len_used) {
717                         GPU_vertbuf_data_resize(vbo, vbo_len_used);
718                 }
719
720                 GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
721
722                 cache->overlay.verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
723                 cache->overlay.verts_no_handles = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, ibo, GPU_BATCH_OWNS_INDEX);
724         }
725
726         if (cache->overlay.edges == NULL) {
727                 GPUVertBuf *vbo = cache->overlay.verts->verts[0];
728
729                 const int edge_len =  curve_render_data_overlay_edges_len_get(rdata);
730                 const int vbo_len_capacity = edge_len * 2;
731
732                 GPUIndexBufBuilder elb;
733                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, vbo_len_capacity, vbo->vertex_len);
734
735                 int curr_index = 0;
736                 int i = 0;
737                 for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, i++) {
738                         if (nu->bezt) {
739                                 int a = 0;
740                                 for (const BezTriple *bezt = nu->bezt; a < nu->pntsu; a++, bezt++) {
741                                         if (bezt->hide == false) {
742                                                 GPU_indexbuf_add_line_verts(&elb, curr_index + 1, curr_index + 0);
743                                                 GPU_indexbuf_add_line_verts(&elb, curr_index + 1, curr_index + 2);
744                                                 curr_index += 3;
745                                         }
746                                 }
747                         }
748                         else if (nu->bp) {
749                                 int a = 0;
750                                 int next_v_index = curr_index;
751                                 for (const BPoint *bp = nu->bp; a < nu->pntsu; a++, bp++) {
752                                         if (bp->hide == false) {
753                                                 next_v_index += 1;
754                                         }
755                                 }
756
757                                 int pt_len = nu->pntsu * nu->pntsv;
758                                 for (a = 0; a < pt_len; a++) {
759                                         const BPoint *bp_curr = &nu->bp[a];
760                                         const BPoint *bp_next_u = ((a % nu->pntsu) < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
761                                         const BPoint *bp_next_v = (a < (pt_len - nu->pntsu)) ? &nu->bp[a + nu->pntsu] : NULL;
762                                         if (bp_curr->hide == false) {
763                                                 if (bp_next_u && (bp_next_u->hide == false)) {
764                                                         GPU_indexbuf_add_line_verts(&elb, curr_index, curr_index + 1);
765                                                 }
766                                                 if (bp_next_v && (bp_next_v->hide == false)) {
767                                                         GPU_indexbuf_add_line_verts(&elb, curr_index, next_v_index);
768                                                 }
769                                                 curr_index += 1;
770                                         }
771                                         if (bp_next_v && (bp_next_v->hide == false)) {
772                                                 next_v_index += 1;
773                                         }
774                                 }
775                         }
776                 }
777
778                 GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
779                 cache->overlay.edges = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, ibo, GPU_BATCH_OWNS_INDEX);
780         }
781
782         curve_render_data_free(rdata);
783 }
784
785 static GPUBatch *curve_batch_cache_get_pos_and_normals(CurveRenderData *rdata, CurveBatchCache *cache)
786 {
787         BLI_assert(rdata->types & CU_DATATYPE_SURFACE);
788         if (cache->surface.batch == NULL) {
789                 ListBase *lb = &rdata->ob_curve_cache->disp;
790
791                 if (cache->surface.verts == NULL) {
792                         cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
793                 }
794                 if (cache->surface.triangles_in_order == NULL) {
795                         cache->surface.triangles_in_order = DRW_displist_indexbuf_calc_triangles_in_order(lb);
796                 }
797                 cache->surface.batch = GPU_batch_create(
798                         GPU_PRIM_TRIS, cache->surface.verts, cache->surface.triangles_in_order);
799         }
800
801         return cache->surface.batch;
802 }
803
804 static GPUTexture *curve_batch_cache_get_edges_overlay_texture_buf(CurveRenderData *rdata, CurveBatchCache *cache)
805 {
806         BLI_assert(rdata->types & CU_DATATYPE_SURFACE);
807
808         if (cache->face_wire.elem_tx != NULL) {
809                 return cache->face_wire.elem_tx;
810         }
811
812         ListBase *lb = &rdata->ob_curve_cache->disp;
813
814         /* We need a special index buffer. */
815         GPUVertBuf *vbo = cache->face_wire.elem_vbo = DRW_displist_create_edges_overlay_texture_buf(lb);
816
817         /* Upload data early because we need to create the texture for it. */
818         GPU_vertbuf_use(vbo);
819         cache->face_wire.elem_tx = GPU_texture_create_from_vertbuf(vbo);
820         cache->face_wire.tri_count = vbo->vertex_alloc / 3;
821
822         return cache->face_wire.elem_tx;
823 }
824
825 static GPUTexture *curve_batch_cache_get_vert_pos_and_nor_in_order_buf(CurveRenderData *rdata, CurveBatchCache *cache)
826 {
827         BLI_assert(rdata->types & CU_DATATYPE_SURFACE);
828
829         if (cache->face_wire.verts_tx == NULL) {
830                 curve_batch_cache_get_pos_and_normals(rdata, cache);
831                 GPU_vertbuf_use(cache->surface.verts); /* Upload early for buffer texture creation. */
832                 cache->face_wire.verts_tx = GPU_texture_create_buffer(GPU_R32F, cache->surface.verts->vbo_id);
833         }
834
835         return cache->face_wire.verts_tx;
836 }
837
838 /** \} */
839
840
841 /* -------------------------------------------------------------------- */
842
843 /** \name Private Object/Font Cache API
844  * \{ */
845
846
847 static GPUBatch *curve_batch_cache_get_overlay_select(CurveRenderData *rdata, CurveBatchCache *cache)
848 {
849         BLI_assert(rdata->types & CU_DATATYPE_TEXT_SELECT);
850         if (cache->text.select == NULL) {
851                 EditFont *ef = rdata->text.edit_font;
852                 static GPUVertFormat format = { 0 };
853                 static struct { uint pos; } attr_id;
854                 if (format.attr_len == 0) {
855                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
856                 }
857
858                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
859                 const int vbo_len_capacity = ef->selboxes_len * 6;
860                 int vbo_len_used = 0;
861                 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
862
863                 float box[4][3];
864
865                 /* fill in xy below */
866                 box[0][2] = box[1][2] = box[2][2] = box[3][2] = 0.001;
867
868                 for (int i = 0; i < ef->selboxes_len; i++) {
869                         EditFontSelBox *sb = &ef->selboxes[i];
870
871                         float selboxw;
872                         if (i + 1 != ef->selboxes_len) {
873                                 if (ef->selboxes[i + 1].y == sb->y)
874                                         selboxw = ef->selboxes[i + 1].x - sb->x;
875                                 else
876                                         selboxw = sb->w;
877                         }
878                         else {
879                                 selboxw = sb->w;
880                         }
881
882                         if (sb->rot == 0.0f) {
883                                 copy_v2_fl2(box[0], sb->x, sb->y);
884                                 copy_v2_fl2(box[1], sb->x + selboxw, sb->y);
885                                 copy_v2_fl2(box[2], sb->x + selboxw, sb->y + sb->h);
886                                 copy_v2_fl2(box[3], sb->x, sb->y + sb->h);
887                         }
888                         else {
889                                 float mat[2][2];
890
891                                 angle_to_mat2(mat, sb->rot);
892
893                                 copy_v2_fl2(box[0], sb->x, sb->y);
894
895                                 copy_v2_fl2(box[1], selboxw, 0.0f);
896                                 mul_m2v2(mat, box[1]);
897                                 add_v2_v2(box[1], &sb->x);
898
899                                 copy_v2_fl2(box[2], selboxw, sb->h);
900                                 mul_m2v2(mat, box[2]);
901                                 add_v2_v2(box[2], &sb->x);
902
903                                 copy_v2_fl2(box[3], 0.0f, sb->h);
904                                 mul_m2v2(mat, box[3]);
905                                 add_v2_v2(box[3], &sb->x);
906                         }
907
908                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[0]);
909                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[1]);
910                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[2]);
911
912                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[0]);
913                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[2]);
914                         GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[3]);
915                 }
916                 BLI_assert(vbo_len_used == vbo_len_capacity);
917                 cache->text.select = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
918         }
919         return cache->text.select;
920 }
921
922 static GPUBatch *curve_batch_cache_get_overlay_cursor(CurveRenderData *rdata, CurveBatchCache *cache)
923 {
924         BLI_assert(rdata->types & CU_DATATYPE_TEXT_SELECT);
925         if (cache->text.cursor == NULL) {
926                 static GPUVertFormat format = { 0 };
927                 static struct { uint pos; } attr_id;
928                 if (format.attr_len == 0) {
929                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
930                 }
931
932                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
933                 const int vbo_len_capacity = 4;
934                 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
935                 for (int i = 0; i < 4; i++) {
936                         GPU_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->text.edit_font->textcurs[i]);
937                 }
938                 cache->text.cursor = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
939         }
940         return cache->text.cursor;
941 }
942
943 /** \} */
944
945 /* -------------------------------------------------------------------- */
946
947 /** \name Public Object/Curve API
948  * \{ */
949
950 GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu, CurveCache *ob_curve_cache)
951 {
952         CurveBatchCache *cache = curve_batch_cache_get(cu);
953
954         if (cache->wire.batch == NULL) {
955                 /* create batch from Curve */
956                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_WIRE);
957
958                 cache->wire.batch = GPU_batch_create(
959                         GPU_PRIM_LINES,
960                         curve_batch_cache_get_wire_verts(rdata, cache),
961                         curve_batch_cache_get_wire_edges(rdata, cache));
962
963                 curve_render_data_free(rdata);
964         }
965         return cache->wire.batch;
966 }
967
968 GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu, CurveCache *ob_curve_cache, float normal_size)
969 {
970         CurveBatchCache *cache = curve_batch_cache_get(cu);
971
972         if (cache->normal.batch != NULL) {
973                 cache->normal_size = normal_size;
974                 if (cache->normal_size != normal_size) {
975                         GPU_BATCH_DISCARD_SAFE(cache->normal.batch);
976                         GPU_VERTBUF_DISCARD_SAFE(cache->normal.edges);
977                 }
978         }
979         cache->normal_size = normal_size;
980
981         if (cache->normal.batch == NULL) {
982                 /* create batch from Curve */
983                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_NORMAL);
984
985                 cache->normal.batch = GPU_batch_create(
986                         GPU_PRIM_LINES,
987                         curve_batch_cache_get_normal_verts(rdata, cache),
988                         curve_batch_cache_get_normal_edges(rdata, cache));
989
990                 curve_render_data_free(rdata);
991                 cache->normal_size = normal_size;
992         }
993         return cache->normal.batch;
994 }
995
996 GPUBatch *DRW_curve_batch_cache_get_overlay_edges(Curve *cu)
997 {
998         CurveBatchCache *cache = curve_batch_cache_get(cu);
999
1000         if (cache->overlay.edges == NULL) {
1001                 curve_batch_cache_create_overlay_batches(cu);
1002         }
1003
1004         return cache->overlay.edges;
1005 }
1006
1007 GPUBatch *DRW_curve_batch_cache_get_overlay_verts(Curve *cu, bool handles)
1008 {
1009         CurveBatchCache *cache = curve_batch_cache_get(cu);
1010
1011         if (cache->overlay.verts == NULL || cache->overlay.verts_no_handles == NULL) {
1012                 curve_batch_cache_create_overlay_batches(cu);
1013         }
1014
1015         return (handles) ? cache->overlay.verts : cache->overlay.verts_no_handles;
1016 }
1017
1018 GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(
1019         struct Curve *cu, struct CurveCache *ob_curve_cache)
1020 {
1021         CurveBatchCache *cache = curve_batch_cache_get(cu);
1022
1023         if (cache->surface.batch == NULL) {
1024                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
1025
1026                 curve_batch_cache_get_pos_and_normals(rdata, cache);
1027
1028                 curve_render_data_free(rdata);
1029         }
1030
1031         return cache->surface.batch;
1032 }
1033
1034 GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
1035         struct Curve *cu, struct CurveCache *ob_curve_cache,
1036         struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
1037 {
1038         CurveBatchCache *cache = curve_batch_cache_get(cu);
1039
1040         if (cache->surface.mat_len != gpumat_array_len) {
1041                 GPU_BATCH_DISCARD_ARRAY_SAFE(cache->surface.shaded_triangles, cache->surface.mat_len);
1042         }
1043
1044         if (cache->surface.shaded_triangles == NULL) {
1045                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
1046                 ListBase *lb = &rdata->ob_curve_cache->disp;
1047
1048                 cache->surface.mat_len = gpumat_array_len;
1049                 if (cu->flag & CU_UV_ORCO) {
1050                         cache->surface.shaded_triangles = DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(
1051                                 lb, gpumat_array_len);
1052                 }
1053                 else {
1054                         cache->surface.shaded_triangles = MEM_mallocN(
1055                                 sizeof(*cache->surface.shaded_triangles) * gpumat_array_len, __func__);
1056                         GPUIndexBuf **el = DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(
1057                                 lb, gpumat_array_len);
1058
1059                         if (cache->surface.verts == NULL) {
1060                                 cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
1061                         }
1062
1063                         for (int i = 0; i < gpumat_array_len; ++i) {
1064                                 cache->surface.shaded_triangles[i] = GPU_batch_create_ex(
1065                                         GPU_PRIM_TRIS, cache->surface.verts, el[i], GPU_BATCH_OWNS_INDEX);
1066                         }
1067
1068                         MEM_freeN(el); /* Save `el` in cache? */
1069                 }
1070
1071                 curve_render_data_free(rdata);
1072         }
1073
1074         return cache->surface.shaded_triangles;
1075 }
1076
1077 void DRW_curve_batch_cache_get_wireframes_face_texbuf(
1078         Curve *cu, CurveCache *ob_curve_cache,
1079         GPUTexture **verts_data, GPUTexture **face_indices, int *tri_count, bool UNUSED(reduce_len))
1080 {
1081         CurveBatchCache *cache = curve_batch_cache_get(cu);
1082
1083         if (cache->face_wire.elem_tx == NULL || cache->face_wire.verts_tx == NULL) {
1084                 CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
1085
1086                 curve_batch_cache_get_edges_overlay_texture_buf(rdata, cache);
1087                 curve_batch_cache_get_vert_pos_and_nor_in_order_buf(rdata, cache);
1088
1089                 curve_render_data_free(rdata);
1090         }
1091
1092         *tri_count = cache->face_wire.tri_count;
1093         *face_indices = cache->face_wire.elem_tx;
1094         *verts_data = cache->face_wire.verts_tx;
1095 }
1096
1097 /* -------------------------------------------------------------------- */
1098
1099 /** \name Public Object/Font API
1100  * \{ */
1101
1102 GPUBatch *DRW_curve_batch_cache_get_overlay_select(Curve *cu)
1103 {
1104         CurveBatchCache *cache = curve_batch_cache_get(cu);
1105
1106         if (cache->text.select == NULL) {
1107                 CurveRenderData *rdata = curve_render_data_create(cu, NULL, CU_DATATYPE_TEXT_SELECT);
1108
1109                 curve_batch_cache_get_overlay_select(rdata, cache);
1110
1111                 curve_render_data_free(rdata);
1112         }
1113
1114         return cache->text.select;
1115 }
1116
1117 GPUBatch *DRW_curve_batch_cache_get_overlay_cursor(Curve *cu)
1118 {
1119         CurveBatchCache *cache = curve_batch_cache_get(cu);
1120
1121         if (cache->text.cursor == NULL) {
1122                 CurveRenderData *rdata = curve_render_data_create(cu, NULL, CU_DATATYPE_TEXT_SELECT);
1123
1124                 curve_batch_cache_get_overlay_cursor(rdata, cache);
1125
1126                 curve_render_data_free(rdata);
1127         }
1128
1129         return cache->text.cursor;
1130 }
1131
1132 /** \} */