4ffbe7b7546d2948953fd9ae3e93eeafd84dedd3
[blender.git] / source / blender / draw / intern / draw_cache_impl_lattice.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * Contributor(s): Blender Foundation, Mike Erwin, Dalai Felinto
22  *
23  * ***** END GPL LICENSE BLOCK *****
24  */
25
26 /** \file draw_cache_impl_lattice.c
27  *  \ingroup draw
28  *
29  * \brief Lattice API for render engines
30  */
31
32 #include "MEM_guardedalloc.h"
33
34 #include "BLI_utildefines.h"
35 #include "BLI_math_vector.h"
36
37 #include "DNA_curve_types.h"
38 #include "DNA_lattice_types.h"
39 #include "DNA_meshdata_types.h"
40 #include "DNA_userdef_types.h"
41
42 #include "BKE_lattice.h"
43 #include "BKE_deform.h"
44 #include "BKE_colorband.h"
45
46 #include "GPU_batch.h"
47
48 #include "draw_cache_impl.h"  /* own include */
49
50 #define SELECT   1
51
52 /**
53  * TODO
54  * - 'DispList' is currently not used
55  *   (we could avoid using since it will be removed)
56  */
57
58 static void lattice_batch_cache_clear(Lattice *lt);
59
60 /* ---------------------------------------------------------------------- */
61 /* Lattice Interface, direct access to basic data. */
62
63 static int vert_len_calc(int u, int v, int w)
64 {
65         if (u <= 0 || v <= 0 || w <= 0) {
66                 return 0;
67         }
68         return u * v * w;
69 }
70
71 static int edge_len_calc(int u, int v, int w)
72 {
73         if (u <= 0 || v <= 0 || w <= 0) {
74                 return 0;
75         }
76         return (((((u - 1) * v) +
77                   ((v - 1) * u)) * w) +
78                 ((w - 1) * (u * v)));
79 }
80
81 static int lattice_render_verts_len_get(Lattice *lt)
82 {
83         if (lt->editlatt) {
84                 lt = lt->editlatt->latt;
85         }
86
87         const int u = lt->pntsu;
88         const int v = lt->pntsv;
89         const int w = lt->pntsw;
90
91         if ((lt->flag & LT_OUTSIDE) == 0) {
92                 return vert_len_calc(u, v, w);
93         }
94         else {
95                 /* TODO remove internal coords */
96                 return vert_len_calc(u, v, w);
97         }
98 }
99
100 static int lattice_render_edges_len_get(Lattice *lt)
101 {
102         if (lt->editlatt) {
103                 lt = lt->editlatt->latt;
104         }
105
106         const int u = lt->pntsu;
107         const int v = lt->pntsv;
108         const int w = lt->pntsw;
109
110         if ((lt->flag & LT_OUTSIDE) == 0) {
111                 return edge_len_calc(u, v, w);
112         }
113         else {
114                 /* TODO remove internal coords */
115                 return edge_len_calc(u, v, w);
116         }
117 }
118
119 /* ---------------------------------------------------------------------- */
120 /* Lattice Interface, indirect, partially cached access to complex data. */
121
122 typedef struct LatticeRenderData {
123         int types;
124
125         int vert_len;
126         int edge_len;
127
128         struct {
129                 int u_len, v_len, w_len;
130         } dims;
131         bool show_only_outside;
132
133         struct EditLatt *edit_latt;
134         BPoint *bp;
135
136         int actbp;
137
138         struct MDeformVert *dvert;
139 } LatticeRenderData;
140
141 enum {
142         LR_DATATYPE_VERT       = 1 << 0,
143         LR_DATATYPE_EDGE       = 1 << 1,
144         LR_DATATYPE_OVERLAY    = 1 << 2,
145 };
146
147 static LatticeRenderData *lattice_render_data_create(Lattice *lt, const int types)
148 {
149         LatticeRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
150         rdata->types = types;
151
152         if (lt->editlatt) {
153                 EditLatt *editlatt = lt->editlatt;
154                 lt = editlatt->latt;
155
156                 rdata->edit_latt = editlatt;
157
158                 rdata->dvert = lt->dvert;
159
160                 if (types & (LR_DATATYPE_VERT)) {
161                         rdata->vert_len = lattice_render_verts_len_get(lt);
162                 }
163                 if (types & (LR_DATATYPE_EDGE)) {
164                         rdata->edge_len = lattice_render_edges_len_get(lt);
165                 }
166                 if (types & LR_DATATYPE_OVERLAY) {
167                         rdata->actbp = lt->actbp;
168                 }
169         }
170         else {
171                 rdata->dvert = NULL;
172
173                 if (types & (LR_DATATYPE_VERT)) {
174                         rdata->vert_len = lattice_render_verts_len_get(lt);
175                 }
176                 if (types & (LR_DATATYPE_EDGE)) {
177                         rdata->edge_len = lattice_render_edges_len_get(lt);
178                         /*no edge data */
179                 }
180         }
181
182         rdata->bp = lt->def;
183
184         rdata->dims.u_len = lt->pntsu;
185         rdata->dims.v_len = lt->pntsv;
186         rdata->dims.w_len = lt->pntsw;
187
188         rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
189         rdata->actbp = lt->actbp;
190
191         return rdata;
192 }
193
194 static void lattice_render_data_free(LatticeRenderData *rdata)
195 {
196 #if 0
197         if (rdata->loose_verts) {
198                 MEM_freeN(rdata->loose_verts);
199         }
200 #endif
201         MEM_freeN(rdata);
202 }
203
204 static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
205 {
206         BLI_assert(rdata->types & LR_DATATYPE_VERT);
207         return rdata->vert_len;
208 }
209
210 static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
211 {
212         BLI_assert(rdata->types & LR_DATATYPE_EDGE);
213         return rdata->edge_len;
214 }
215
216 static const BPoint *lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
217 {
218         BLI_assert(rdata->types & LR_DATATYPE_VERT);
219         return &rdata->bp[vert_idx];
220 }
221
222 /* TODO, move into shader? */
223 static void rgb_from_weight(float r_rgb[3], const float weight)
224 {
225         const float blend = ((weight / 2.0f) + 0.5f);
226
227         if (weight <= 0.25f) {    /* blue->cyan */
228                 r_rgb[0] = 0.0f;
229                 r_rgb[1] = blend * weight * 4.0f;
230                 r_rgb[2] = blend;
231         }
232         else if (weight <= 0.50f) {  /* cyan->green */
233                 r_rgb[0] = 0.0f;
234                 r_rgb[1] = blend;
235                 r_rgb[2] = blend * (1.0f - ((weight - 0.25f) * 4.0f));
236         }
237         else if (weight <= 0.75f) {  /* green->yellow */
238                 r_rgb[0] = blend * ((weight - 0.50f) * 4.0f);
239                 r_rgb[1] = blend;
240                 r_rgb[2] = 0.0f;
241         }
242         else if (weight <= 1.0f) {  /* yellow->red */
243                 r_rgb[0] = blend;
244                 r_rgb[1] = blend * (1.0f - ((weight - 0.75f) * 4.0f));
245                 r_rgb[2] = 0.0f;
246         }
247         else {
248                 /* exceptional value, unclamped or nan,
249                  * avoid uninitialized memory use */
250                 r_rgb[0] = 1.0f;
251                 r_rgb[1] = 0.0f;
252                 r_rgb[2] = 1.0f;
253         }
254 }
255
256 static void lattice_render_data_weight_col_get(const LatticeRenderData *rdata, const int vert_idx,
257                                            const int actdef, float r_col[4])
258 {
259         if (actdef > -1) {
260                 float weight = defvert_find_weight(rdata->dvert + vert_idx, actdef);
261
262                 if (U.flag & USER_CUSTOM_RANGE) {
263                         BKE_colorband_evaluate(&U.coba_weight, weight, r_col);
264                 }
265                 else {
266                         rgb_from_weight(r_col, weight);
267                 }
268
269                 r_col[3] = 1.0f;
270         }
271         else {
272                 zero_v4(r_col);
273         }
274 }
275
276 enum {
277         VFLAG_VERTEX_SELECTED = 1 << 0,
278         VFLAG_VERTEX_ACTIVE   = 1 << 1,
279 };
280
281 /* ---------------------------------------------------------------------- */
282 /* Lattice GPUBatch Cache */
283
284 typedef struct LatticeBatchCache {
285         GPUVertBuf *pos;
286         GPUIndexBuf *edges;
287
288         GPUBatch *all_verts;
289         GPUBatch *all_edges;
290
291         GPUBatch *overlay_verts;
292
293         /* settings to determine if cache is invalid */
294         bool is_dirty;
295
296         struct {
297                 int u_len, v_len, w_len;
298         } dims;
299         bool show_only_outside;
300
301         bool is_editmode;
302 } LatticeBatchCache;
303
304 /* GPUBatch cache management. */
305
306 static bool lattice_batch_cache_valid(Lattice *lt)
307 {
308         LatticeBatchCache *cache = lt->batch_cache;
309
310         if (cache == NULL) {
311                 return false;
312         }
313
314         if (cache->is_editmode != (lt->editlatt != NULL)) {
315                 return false;
316         }
317
318         if (cache->is_dirty) {
319                 return false;
320         }
321         else {
322                 if ((cache->dims.u_len != lt->pntsu) ||
323                          (cache->dims.v_len != lt->pntsv) ||
324                          (cache->dims.w_len != lt->pntsw) ||
325                          ((cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0))))
326                 {
327                         return false;
328                 }
329         }
330
331         return true;
332 }
333
334 static void lattice_batch_cache_init(Lattice *lt)
335 {
336         LatticeBatchCache *cache = lt->batch_cache;
337
338         if (!cache) {
339                 cache = lt->batch_cache = MEM_callocN(sizeof(*cache), __func__);
340         }
341         else {
342                 memset(cache, 0, sizeof(*cache));
343         }
344
345         cache->dims.u_len = lt->pntsu;
346         cache->dims.v_len = lt->pntsv;
347         cache->dims.w_len = lt->pntsw;
348         cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
349
350         cache->is_editmode = lt->editlatt != NULL;
351
352         cache->is_dirty = false;
353 }
354
355 static LatticeBatchCache *lattice_batch_cache_get(Lattice *lt)
356 {
357         if (!lattice_batch_cache_valid(lt)) {
358                 lattice_batch_cache_clear(lt);
359                 lattice_batch_cache_init(lt);
360         }
361         return lt->batch_cache;
362 }
363
364 void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
365 {
366         LatticeBatchCache *cache = lt->batch_cache;
367         if (cache == NULL) {
368                 return;
369         }
370         switch (mode) {
371                 case BKE_LATTICE_BATCH_DIRTY_ALL:
372                         cache->is_dirty = true;
373                         break;
374                 case BKE_LATTICE_BATCH_DIRTY_SELECT:
375                         /* TODO Separate Flag vbo */
376                         GPU_BATCH_DISCARD_SAFE(cache->overlay_verts);
377                         break;
378                 default:
379                         BLI_assert(0);
380         }
381 }
382
383 static void lattice_batch_cache_clear(Lattice *lt)
384 {
385         LatticeBatchCache *cache = lt->batch_cache;
386         if (!cache) {
387                 return;
388         }
389
390         GPU_BATCH_DISCARD_SAFE(cache->all_verts);
391         GPU_BATCH_DISCARD_SAFE(cache->all_edges);
392         GPU_BATCH_DISCARD_SAFE(cache->overlay_verts);
393
394         GPU_VERTBUF_DISCARD_SAFE(cache->pos);
395         GPU_INDEXBUF_DISCARD_SAFE(cache->edges);
396 }
397
398 void DRW_lattice_batch_cache_free(Lattice *lt)
399 {
400         lattice_batch_cache_clear(lt);
401         MEM_SAFE_FREE(lt->batch_cache);
402 }
403
404 /* GPUBatch cache usage. */
405 static GPUVertBuf *lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache,
406                                                 bool use_weight, const int actdef)
407 {
408         BLI_assert(rdata->types & LR_DATATYPE_VERT);
409
410         if (cache->pos == NULL) {
411                 static GPUVertFormat format = { 0 };
412                 static struct { uint pos, col; } attr_id;
413
414                 GPU_vertformat_clear(&format);
415
416                 /* initialize vertex format */
417                 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
418
419                 if (use_weight) {
420                         attr_id.col = GPU_vertformat_attr_add(&format, "color", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
421                 }
422
423                 const int vert_len = lattice_render_data_verts_len_get(rdata);
424
425                 cache->pos = GPU_vertbuf_create_with_format(&format);
426                 GPU_vertbuf_data_alloc(cache->pos, vert_len);
427                 for (int i = 0; i < vert_len; ++i) {
428                         const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
429                         GPU_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
430
431                         if (use_weight) {
432                                 float w_col[4];
433                                 lattice_render_data_weight_col_get(rdata, i, actdef, w_col);
434
435                                 GPU_vertbuf_attr_set(cache->pos, attr_id.col, i, w_col);
436                         }
437                 }
438         }
439
440         return cache->pos;
441 }
442
443 static GPUIndexBuf *lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
444 {
445         BLI_assert(rdata->types & (LR_DATATYPE_VERT | LR_DATATYPE_EDGE));
446
447         if (cache->edges == NULL) {
448                 const int vert_len = lattice_render_data_verts_len_get(rdata);
449                 const int edge_len = lattice_render_data_edges_len_get(rdata);
450                 int edge_len_real = 0;
451
452                 GPUIndexBufBuilder elb;
453                 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
454
455 #define LATT_INDEX(u, v, w) \
456         ((((w) * rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
457
458                 for (int w = 0; w < rdata->dims.w_len; w++) {
459                         int wxt = (w == 0 || w == rdata->dims.w_len - 1);
460                         for (int v = 0; v < rdata->dims.v_len; v++) {
461                                 int vxt = (v == 0 || v == rdata->dims.v_len - 1);
462                                 for (int u = 0; u < rdata->dims.u_len; u++) {
463                                         int uxt = (u == 0 || u == rdata->dims.u_len - 1);
464
465                                         if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
466                                                 GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
467                                                 BLI_assert(edge_len_real <= edge_len);
468                                                 edge_len_real++;
469                                         }
470                                         if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
471                                                 GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
472                                                 BLI_assert(edge_len_real <= edge_len);
473                                                 edge_len_real++;
474                                         }
475                                         if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
476                                                 GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
477                                                 BLI_assert(edge_len_real <= edge_len);
478                                                 edge_len_real++;
479                                         }
480                                 }
481                         }
482                 }
483
484 #undef LATT_INDEX
485
486                 if (rdata->show_only_outside) {
487                         BLI_assert(edge_len_real <= edge_len);
488                 }
489                 else {
490                         BLI_assert(edge_len_real == edge_len);
491                 }
492
493                 cache->edges = GPU_indexbuf_build(&elb);
494         }
495
496         return cache->edges;
497 }
498
499 static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
500 {
501         /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
502         int options = LR_DATATYPE_VERT | LR_DATATYPE_OVERLAY;
503
504         LatticeBatchCache *cache = lattice_batch_cache_get(lt);
505         LatticeRenderData *rdata = lattice_render_data_create(lt, options);
506
507         if (cache->overlay_verts == NULL) {
508                 static GPUVertFormat format = { 0 };
509                 static struct { uint pos, data; } attr_id;
510                 if (format.attr_len == 0) {
511                         /* initialize vertex format */
512                         attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
513                         attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
514                 }
515
516                 const int vert_len = lattice_render_data_verts_len_get(rdata);
517
518                 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
519                 GPU_vertbuf_data_alloc(vbo, vert_len);
520                 for (int i = 0; i < vert_len; ++i) {
521                         const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
522
523                         char vflag = 0;
524                         if (bp->f1 & SELECT) {
525                                 if (i == rdata->actbp) {
526                                         vflag |= VFLAG_VERTEX_ACTIVE;
527                                 }
528                                 else {
529                                         vflag |= VFLAG_VERTEX_SELECTED;
530                                 }
531                         }
532
533                         GPU_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
534                         GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
535                 }
536
537                 cache->overlay_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
538         }
539
540         lattice_render_data_free(rdata);
541 }
542
543 GPUBatch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
544 {
545         LatticeBatchCache *cache = lattice_batch_cache_get(lt);
546
547         if (cache->all_edges == NULL) {
548                 /* create batch from Lattice */
549                 LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT | LR_DATATYPE_EDGE);
550
551                 cache->all_edges = GPU_batch_create(GPU_PRIM_LINES, lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
552                                                 lattice_batch_cache_get_edges(rdata, cache));
553
554                 lattice_render_data_free(rdata);
555         }
556
557         return cache->all_edges;
558 }
559
560 GPUBatch *DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
561 {
562         LatticeBatchCache *cache = lattice_batch_cache_get(lt);
563
564         if (cache->all_verts == NULL) {
565                 LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT);
566
567                 cache->all_verts = GPU_batch_create(GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), NULL);
568
569                 lattice_render_data_free(rdata);
570         }
571
572         return cache->all_verts;
573 }
574
575 GPUBatch *DRW_lattice_batch_cache_get_overlay_verts(Lattice *lt)
576 {
577         LatticeBatchCache *cache = lattice_batch_cache_get(lt);
578
579         if (cache->overlay_verts == NULL) {
580                 lattice_batch_cache_create_overlay_batches(lt);
581         }
582
583         return cache->overlay_verts;
584 }