Sculpt Draw: Add support for wireframe geometry
[blender.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2005 Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup gpu
22  *
23  * Mesh drawing using OpenGL VBO (Vertex Buffer Objects)
24  */
25
26 #include <limits.h>
27 #include <stddef.h>
28 #include <string.h>
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_bitmap.h"
33 #include "BLI_math.h"
34 #include "BLI_utildefines.h"
35 #include "BLI_ghash.h"
36
37 #include "DNA_meshdata_types.h"
38
39 #include "BKE_ccg.h"
40 #include "BKE_DerivedMesh.h"
41 #include "BKE_paint.h"
42 #include "BKE_mesh.h"
43 #include "BKE_pbvh.h"
44
45 #include "GPU_buffers.h"
46 #include "GPU_draw.h"
47 #include "GPU_immediate.h"
48 #include "GPU_batch.h"
49
50 #include "bmesh.h"
51
52 /* XXX: the rest of the code in this file is used for optimized PBVH
53  * drawing and doesn't interact at all with the buffer code above */
54
55 struct GPU_PBVH_Buffers {
56         GPUIndexBuf *index_buf, *index_buf_fast;
57         GPUIndexBuf *index_lines_buf, *index_lines_buf_fast;
58         GPUVertBuf *vert_buf;
59
60         GPUBatch *lines;
61         GPUBatch *lines_fast;
62         GPUBatch *triangles;
63         GPUBatch *triangles_fast;
64
65         /* mesh pointers in case buffer allocation fails */
66         const MPoly *mpoly;
67         const MLoop *mloop;
68         const MLoopTri *looptri;
69         const MVert *mvert;
70
71         const int *face_indices;
72         int        face_indices_len;
73
74         /* grid pointers */
75         CCGKey gridkey;
76         CCGElem **grids;
77         const DMFlagMat *grid_flag_mats;
78         BLI_bitmap * const *grid_hidden;
79         const int *grid_indices;
80         int totgrid;
81
82         bool use_bmesh;
83
84         uint tot_tri, tot_quad;
85
86         /* The PBVH ensures that either all faces in the node are
87          * smooth-shaded or all faces are flat-shaded */
88         bool smooth;
89
90         bool show_mask;
91 };
92
93 static struct {
94         uint pos, nor, msk;
95 } g_vbo_id = {0};
96
97 /** \} */
98
99 /* -------------------------------------------------------------------- */
100 /** \name PBVH Utils
101  * \{ */
102
103 /* Allocates a non-initialized buffer to be sent to GPU.
104  * Return is false it indicates that the memory map failed. */
105 static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
106 {
107         if (buffers->vert_buf == NULL) {
108                 /* Initialize vertex buffer */
109                 /* match 'VertexBufferFormat' */
110
111                 static GPUVertFormat format = {0};
112                 if (format.attr_len == 0) {
113                         g_vbo_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
114                         g_vbo_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
115                         g_vbo_id.msk = GPU_vertformat_attr_add(&format, "msk", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
116                 }
117 #if 0
118                 buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_DYNAMIC);
119                 GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
120         }
121         else if (vert_len != buffers->vert_buf->vertex_len) {
122                 GPU_vertbuf_data_resize(buffers->vert_buf, vert_len);
123         }
124 #else
125                 buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STATIC);
126         }
127         GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
128 #endif
129         return buffers->vert_buf->data != NULL;
130 }
131
132 static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
133 {
134         /* force flushing to the GPU */
135         if (buffers->vert_buf->data) {
136                 GPU_vertbuf_use(buffers->vert_buf);
137         }
138
139         if (buffers->triangles == NULL) {
140                 buffers->triangles = GPU_batch_create(
141                         prim, buffers->vert_buf,
142                         /* can be NULL */
143                         buffers->index_buf);
144         }
145
146         if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
147                 buffers->triangles_fast = GPU_batch_create(
148                         prim, buffers->vert_buf,
149                         buffers->index_buf_fast);
150         }
151
152         if (buffers->lines == NULL) {
153                 BLI_assert(buffers->index_lines_buf != NULL);
154                 buffers->lines = GPU_batch_create(
155                         GPU_PRIM_LINES, buffers->vert_buf,
156                         buffers->index_lines_buf);
157         }
158
159         if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
160                 buffers->lines_fast = GPU_batch_create(
161                         GPU_PRIM_LINES, buffers->vert_buf,
162                         buffers->index_lines_buf_fast);
163         }
164 }
165
166 /** \} */
167
168 /* -------------------------------------------------------------------- */
169 /** \name Mesh PBVH
170  * \{ */
171
172 void GPU_pbvh_mesh_buffers_update(
173         GPU_PBVH_Buffers *buffers, const MVert *mvert,
174         const int *vert_indices, int totvert, const float *vmask,
175         const int (*face_vert_indices)[3],
176         const int update_flags)
177 {
178         const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
179         bool empty_mask = true;
180
181         {
182                 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
183
184                 /* Build VBO */
185                 if (gpu_pbvh_vert_buf_data_set(buffers, totelem)) {
186                         /* Vertex data is shared if smooth-shaded, but separate
187                          * copies are made for flat shading because normals
188                          * shouldn't be shared. */
189                         if (buffers->smooth) {
190                                 for (uint i = 0; i < totvert; ++i) {
191                                         const MVert *v = &mvert[vert_indices[i]];
192                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, i, v->co);
193                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, i, v->no);
194                                 }
195
196                                 if (vmask && show_mask) {
197                                         for (uint i = 0; i < buffers->face_indices_len; i++) {
198                                                 const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
199                                                 for (uint j = 0; j < 3; j++) {
200                                                         int vidx = face_vert_indices[i][j];
201                                                         int v_index = buffers->mloop[lt->tri[j]].v;
202                                                         float fmask = vmask[v_index];
203                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vidx, &fmask);
204                                                         empty_mask = empty_mask && (fmask == 0.0f);
205                                                 }
206                                         }
207                                 }
208                         }
209                         else {
210                                 /* calculate normal for each polygon only once */
211                                 uint mpoly_prev = UINT_MAX;
212                                 short no[3];
213                                 int vbo_index = 0;
214
215                                 for (uint i = 0; i < buffers->face_indices_len; i++) {
216                                         const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
217                                         const uint vtri[3] = {
218                                             buffers->mloop[lt->tri[0]].v,
219                                             buffers->mloop[lt->tri[1]].v,
220                                             buffers->mloop[lt->tri[2]].v,
221                                         };
222
223                                         if (paint_is_face_hidden(lt, mvert, buffers->mloop))
224                                                 continue;
225
226                                         /* Face normal and mask */
227                                         if (lt->poly != mpoly_prev) {
228                                                 const MPoly *mp = &buffers->mpoly[lt->poly];
229                                                 float fno[3];
230                                                 BKE_mesh_calc_poly_normal(mp, &buffers->mloop[mp->loopstart], mvert, fno);
231                                                 normal_float_to_short_v3(no, fno);
232                                                 mpoly_prev = lt->poly;
233                                         }
234
235                                         float fmask = 0.0f;
236                                         if (vmask && show_mask) {
237                                                 fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f;
238                                         }
239
240                                         for (uint j = 0; j < 3; j++) {
241                                                 const MVert *v = &mvert[vtri[j]];
242
243                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, v->co);
244                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no);
245                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask);
246
247                                                 vbo_index++;
248                                         }
249
250                                         empty_mask = empty_mask && (fmask == 0.0f);
251                                 }
252                         }
253
254                         gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
255                 }
256         }
257
258         buffers->show_mask = !empty_mask;
259         buffers->mvert = mvert;
260 }
261
262 GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
263         const int (*face_vert_indices)[3],
264         const MPoly *mpoly, const MLoop *mloop, const MLoopTri *looptri,
265         const MVert *mvert,
266         const int *face_indices,
267         const int  face_indices_len)
268 {
269         GPU_PBVH_Buffers *buffers;
270         int i, tottri;
271
272         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
273
274         /* smooth or flat for all */
275         buffers->smooth = mpoly[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
276
277         buffers->show_mask = false;
278
279         /* Count the number of visible triangles */
280         for (i = 0, tottri = 0; i < face_indices_len; ++i) {
281                 const MLoopTri *lt = &looptri[face_indices[i]];
282                 if (!paint_is_face_hidden(lt, mvert, mloop))
283                         tottri++;
284         }
285
286         if (tottri == 0) {
287                 buffers->tot_tri = 0;
288
289                 buffers->mpoly = mpoly;
290                 buffers->mloop = mloop;
291                 buffers->looptri = looptri;
292                 buffers->face_indices = face_indices;
293                 buffers->face_indices_len = 0;
294
295                 return buffers;
296         }
297
298         GPU_BATCH_DISCARD_SAFE(buffers->triangles);
299         GPU_BATCH_DISCARD_SAFE(buffers->lines);
300         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
301         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
302
303         /* An element index buffer is used for smooth shading, but flat
304          * shading requires separate vertex normals so an index buffer
305          * can't be used there. */
306         if (buffers->smooth) {
307                 /* Fill the triangle and line buffers. */
308                 GPUIndexBufBuilder elb, elb_lines;
309                 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, INT_MAX);
310                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
311
312                 for (i = 0; i < face_indices_len; ++i) {
313                         const MLoopTri *lt = &looptri[face_indices[i]];
314
315                         /* Skip hidden faces */
316                         if (paint_is_face_hidden(lt, mvert, mloop))
317                                 continue;
318
319                         GPU_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i]));
320
321                         /* TODO skip "non-real" edges. */
322                         GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][0], face_vert_indices[i][1]);
323                         GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][1], face_vert_indices[i][2]);
324                         GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][2], face_vert_indices[i][0]);
325                 }
326                 buffers->index_buf = GPU_indexbuf_build(&elb);
327                 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
328         }
329         else {
330                 /* Fill the only the line buffer. */
331                 GPUIndexBufBuilder elb_lines;
332                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
333
334                 for (i = 0; i < face_indices_len; ++i) {
335                         const MLoopTri *lt = &looptri[face_indices[i]];
336
337                         /* Skip hidden faces */
338                         if (paint_is_face_hidden(lt, mvert, mloop))
339                                 continue;
340
341                         /* TODO skip "non-real" edges. */
342                         GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 0, i * 3 + 1);
343                         GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 1, i * 3 + 2);
344                         GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 2, i * 3 + 0);
345                 }
346                 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
347         }
348
349         buffers->tot_tri = tottri;
350
351         buffers->mpoly = mpoly;
352         buffers->mloop = mloop;
353         buffers->looptri = looptri;
354
355         buffers->face_indices = face_indices;
356         buffers->face_indices_len = face_indices_len;
357
358         return buffers;
359 }
360
361 /** \} */
362
363 /* -------------------------------------------------------------------- */
364 /** \name Grid PBVH
365  * \{ */
366
367 static void gpu_pbvh_grid_fill_fast_buffer(GPU_PBVH_Buffers *buffers, int totgrid, int gridsize)
368 {
369         GPUIndexBufBuilder elb, elb_lines;
370         if (buffers->smooth) {
371                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
372                 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
373                 for (int i = 0; i < totgrid; i++) {
374                         const uint v0 = i * gridsize * gridsize + gridsize - 1;
375                         const uint v1 = i * gridsize * gridsize;
376                         const uint v2 = (i + 1) * gridsize * gridsize - gridsize;
377                         const uint v3 = (i + 1) * gridsize * gridsize - 1;
378
379                         GPU_indexbuf_add_tri_verts(&elb, v0, v1, v2);
380                         GPU_indexbuf_add_tri_verts(&elb, v3, v0, v2);
381
382                         GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
383                         GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
384                         GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
385                         GPU_indexbuf_add_line_verts(&elb_lines, v3, v0);
386                 }
387         }
388         else {
389                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
390                 GPU_indexbuf_init_ex(&elb, GPU_PRIM_TRI_STRIP, 5 * totgrid, INT_MAX, true);
391                 uint vbo_index_offset = 0;
392                 for (int i = 0; i < totgrid; i++) {
393                         uint grid_indices[4] = {0, 0, 0, 0};
394                         for (int j = 0; j < gridsize - 1; j++) {
395                                 for (int k = 0; k < gridsize - 1; k++) {
396                                         const bool is_row_start = (k == 0);
397                                         const bool is_row_end = (k == gridsize - 2);
398                                         const bool is_grid_start = (j == 0);
399                                         const bool is_grid_end = (j == gridsize - 2);
400                                         const bool is_first_grid = (i == 0);
401                                         const bool is_last_grid = (i == totgrid - 1);
402
403                                         if (is_row_start && !(is_grid_start && is_first_grid)) {
404                                                 vbo_index_offset += 1;
405                                         }
406
407                                         if (is_grid_start && is_row_start) {
408                                                 grid_indices[0] = vbo_index_offset + 0;
409                                         }
410                                         if (is_grid_start && is_row_end) {
411                                                 grid_indices[1] = vbo_index_offset + 2;
412                                         }
413                                         if (is_grid_end && is_row_start) {
414                                                 grid_indices[2] = vbo_index_offset + 1;
415                                         }
416                                         if (is_grid_end && is_row_end) {
417                                                 grid_indices[3] = vbo_index_offset + 3;
418                                         }
419                                         vbo_index_offset += 4;
420
421                                         if (is_row_end && !(is_grid_end && is_last_grid)) {
422                                                 vbo_index_offset += 1;
423                                         }
424                                 }
425                         }
426                         GPU_indexbuf_add_generic_vert(&elb, grid_indices[1]);
427                         GPU_indexbuf_add_generic_vert(&elb, grid_indices[0]);
428                         GPU_indexbuf_add_generic_vert(&elb, grid_indices[3]);
429                         GPU_indexbuf_add_generic_vert(&elb, grid_indices[2]);
430                         GPU_indexbuf_add_primitive_restart(&elb);
431
432                         GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[0], grid_indices[1]);
433                         GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[1], grid_indices[3]);
434                         GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[2], grid_indices[3]);
435                         GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[2], grid_indices[0]);
436                 }
437         }
438         buffers->index_buf_fast = GPU_indexbuf_build(&elb);
439         buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines);
440 }
441
442 void GPU_pbvh_grid_buffers_update(
443         GPU_PBVH_Buffers *buffers, CCGElem **grids,
444         const DMFlagMat *grid_flag_mats, int *grid_indices,
445         int totgrid, const CCGKey *key,
446         const int update_flags)
447 {
448         const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
449         bool empty_mask = true;
450         int i, j, k, x, y;
451
452         buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
453
454         /* Build VBO */
455         const int has_mask = key->has_mask;
456
457         uint vert_count = totgrid * key->grid_area;
458
459         if (!buffers->smooth) {
460                 vert_count = totgrid * SQUARE(key->grid_size - 1) * 4;
461                 /* Count strip restart verts (2 verts between each row and grid) */
462                 vert_count += ((totgrid - 1) + totgrid * (key->grid_size - 2)) * 2;
463         }
464
465         GPU_BATCH_DISCARD_SAFE(buffers->triangles);
466         GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
467         GPU_BATCH_DISCARD_SAFE(buffers->lines);
468         GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
469         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
470         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
471         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
472         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
473
474         if (buffers->index_buf_fast == NULL && key->grid_size > 2) {
475                 gpu_pbvh_grid_fill_fast_buffer(buffers, totgrid, key->grid_size);
476         }
477
478         uint vbo_index_offset = 0;
479         /* Build VBO */
480         if (gpu_pbvh_vert_buf_data_set(buffers, vert_count)) {
481                 GPUIndexBufBuilder elb_lines;
482                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
483
484                 for (i = 0; i < totgrid; ++i) {
485                         CCGElem *grid = grids[grid_indices[i]];
486                         int vbo_index = vbo_index_offset;
487
488                         if (buffers->smooth) {
489                                 for (y = 0; y < key->grid_size; y++) {
490                                         for (x = 0; x < key->grid_size; x++) {
491                                                 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
492                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
493
494                                                 short no_short[3];
495                                                 normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
496                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
497
498                                                 if (has_mask && show_mask) {
499                                                         float fmask = *CCG_elem_mask(key, elem);
500                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask);
501                                                         empty_mask = empty_mask && (fmask == 0.0f);
502                                                 }
503
504                                                 if (x + 1 < key->grid_size) {
505                                                         GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + 1);
506                                                 }
507                                                 if (y + 1 < key->grid_size) {
508                                                         GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + key->grid_size);
509                                                 }
510                                                 vbo_index += 1;
511                                         }
512                                 }
513                                 vbo_index_offset += key->grid_area;
514                         }
515                         else {
516                                 for (j = 0; j < key->grid_size - 1; j++) {
517                                         for (k = 0; k < key->grid_size - 1; k++) {
518                                                 const bool is_row_start = (k == 0);
519                                                 const bool is_row_end = (k == key->grid_size - 2);
520                                                 const bool is_grid_start = (j == 0);
521                                                 const bool is_grid_end = (j == key->grid_size - 2);
522                                                 const bool is_first_grid = (i == 0);
523                                                 const bool is_last_grid = (i == totgrid - 1);
524
525                                                 CCGElem *elems[4] = {
526                                                         CCG_grid_elem(key, grid, k, j + 1),
527                                                         CCG_grid_elem(key, grid, k + 1, j + 1),
528                                                         CCG_grid_elem(key, grid, k + 1, j),
529                                                         CCG_grid_elem(key, grid, k, j),
530                                                 };
531                                                 float *co[4] = {
532                                                     CCG_elem_co(key, elems[0]),
533                                                     CCG_elem_co(key, elems[1]),
534                                                     CCG_elem_co(key, elems[2]),
535                                                     CCG_elem_co(key, elems[3]),
536                                                 };
537
538                                                 float fno[3];
539                                                 short no_short[3];
540                                                 normal_quad_v3(fno, co[0], co[1], co[2], co[3]);
541                                                 normal_float_to_short_v3(no_short, fno);
542
543                                                 if (is_row_start && !(is_grid_start && is_first_grid)) {
544                                                         /* Duplicate first vert
545                                                          * (only pos is needed since the triangle will be degenerate) */
546                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, co[3]);
547                                                         vbo_index += 1;
548                                                         vbo_index_offset += 1;
549                                                 }
550
551                                                 /* Note indices orders (3, 0, 2, 1); we are drawing a triangle strip. */
552                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, co[3]);
553                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
554                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 1, co[0]);
555                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 1, no_short);
556                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 2, co[2]);
557                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 2, no_short);
558                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 3, co[1]);
559                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 3, no_short);
560
561                                                 GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + 1);
562                                                 GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + 2);
563                                                 if (is_row_end) {
564                                                         GPU_indexbuf_add_line_verts(&elb_lines, vbo_index + 2, vbo_index + 3);
565                                                 }
566                                                 if (is_grid_end) {
567                                                         GPU_indexbuf_add_line_verts(&elb_lines, vbo_index + 1, vbo_index + 3);
568                                                 }
569
570                                                 if (has_mask && show_mask) {
571                                                         float fmask = (*CCG_elem_mask(key, elems[0]) +
572                                                                        *CCG_elem_mask(key, elems[1]) +
573                                                                        *CCG_elem_mask(key, elems[2]) +
574                                                                        *CCG_elem_mask(key, elems[3])) * 0.25f;
575                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask);
576                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 1, &fmask);
577                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 2, &fmask);
578                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 3, &fmask);
579                                                         empty_mask = empty_mask && (fmask == 0.0f);
580                                                 }
581
582                                                 if (is_row_end && !(is_grid_end && is_last_grid)) {
583                                                         /* Duplicate last vert
584                                                          * (only pos is needed since the triangle will be degenerate) */
585                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 4, co[1]);
586                                                         vbo_index += 1;
587                                                         vbo_index_offset += 1;
588                                                 }
589                                                 vbo_index += 4;
590                                         }
591                                 }
592                                 vbo_index_offset += (key->grid_size - 1) * (key->grid_size - 1) * 4;
593                         }
594                 }
595
596                 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
597
598                 gpu_pbvh_batch_init(buffers, buffers->smooth ? GPU_PRIM_TRIS : GPU_PRIM_TRI_STRIP);
599         }
600
601         buffers->grids = grids;
602         buffers->grid_indices = grid_indices;
603         buffers->totgrid = totgrid;
604         buffers->grid_flag_mats = grid_flag_mats;
605         buffers->gridkey = *key;
606         buffers->show_mask = !empty_mask;
607
608         //printf("node updated %p\n", buffers);
609 }
610
611 /* Build the element array buffer of grid indices using either
612  * ushorts or uints. */
613 #define FILL_QUAD_BUFFER(max_vert_, tot_quad_, buffer_)                 \
614         {                                                                   \
615                 int offset = 0;                                                 \
616                 int i, j, k;                                                    \
617                                                                         \
618                 GPUIndexBufBuilder elb;                                         \
619                 GPU_indexbuf_init(                                              \
620                        &elb, GPU_PRIM_TRIS, tot_quad_ * 2, max_vert_);          \
621                                                                         \
622                 /* Fill the buffer */                                           \
623                 for (i = 0; i < totgrid; ++i) {                                 \
624                         BLI_bitmap *gh = NULL;                                      \
625                         if (grid_hidden)                                            \
626                                 gh = grid_hidden[(grid_indices)[i]];                    \
627                                                                         \
628                         for (j = 0; j < gridsize - 1; ++j) {                        \
629                                 for (k = 0; k < gridsize - 1; ++k) {                    \
630                                         /* Skip hidden grid face */                         \
631                                         if (gh && paint_is_grid_face_hidden(                \
632                                                 gh, gridsize, k, j))                        \
633                                         {                                                   \
634                                                 continue;                                       \
635                                         }                                                   \
636                                         GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
637                                         GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k);    \
638                                         GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
639                                                                         \
640                                         GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k + 1); \
641                                         GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
642                                         GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
643                                 }                                                       \
644                         }                                                           \
645                                                                         \
646                         offset += gridsize * gridsize;                              \
647                 }                                                               \
648                 buffer_ = GPU_indexbuf_build(&elb);                             \
649         } (void)0
650 /* end FILL_QUAD_BUFFER */
651
652 static GPUIndexBuf *gpu_get_grid_buffer(
653         int gridsize, uint *totquad,
654         /* remove this arg  when GPU gets base-vertex support! */
655         int totgrid)
656 {
657         /* used in the FILL_QUAD_BUFFER macro */
658         BLI_bitmap * const *grid_hidden = NULL;
659         const int *grid_indices = NULL;
660
661         /* Build new VBO */
662         *totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
663         int max_vert = gridsize * gridsize * totgrid;
664
665         GPUIndexBuf *mres_buffer;
666         FILL_QUAD_BUFFER(max_vert, *totquad, mres_buffer);
667
668         return mres_buffer;
669 }
670
671 GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(
672         int *grid_indices, int totgrid, BLI_bitmap **grid_hidden, int gridsize, const CCGKey *UNUSED(key))
673 {
674         GPU_PBVH_Buffers *buffers;
675         int totquad;
676         int fully_visible_totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
677
678         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
679         buffers->grid_hidden = grid_hidden;
680         buffers->totgrid = totgrid;
681
682         buffers->show_mask = false;
683
684         /* Count the number of quads */
685         totquad = BKE_pbvh_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
686
687         /* totally hidden node, return here to avoid BufferData with zero below. */
688         if (totquad == 0)
689                 return buffers;
690
691         /* TODO(fclem) this needs a bit of cleanup. It's only needed for smooth grids.
692          * Could be moved to the update function somehow. */
693         if (totquad == fully_visible_totquad) {
694                 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->tot_quad, totgrid);
695         }
696         else {
697                 uint max_vert = totgrid * gridsize * gridsize;
698                 buffers->tot_quad = totquad;
699
700                 FILL_QUAD_BUFFER(max_vert, totquad, buffers->index_buf);
701         }
702
703         return buffers;
704 }
705
706 #undef FILL_QUAD_BUFFER
707
708 /** \} */
709
710 /* -------------------------------------------------------------------- */
711 /** \name BMesh PBVH
712  * \{ */
713
714 /* Output a BMVert into a VertexBufferFormat array
715  *
716  * The vertex is skipped if hidden, otherwise the output goes into
717  * index '*v_index' in the 'vert_data' array and '*v_index' is
718  * incremented.
719  */
720 static void gpu_bmesh_vert_to_buffer_copy__gwn(
721         BMVert *v,
722         GPUVertBuf *vert_buf,
723         int *v_index,
724         const float fno[3],
725         const float *fmask,
726         const int cd_vert_mask_offset,
727         const bool show_mask,
728         bool *empty_mask)
729 {
730         if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
731
732                 /* Set coord, normal, and mask */
733                 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, *v_index, v->co);
734
735                 short no_short[3];
736                 normal_float_to_short_v3(no_short, fno ? fno : v->no);
737                 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, *v_index, no_short);
738
739                 if (show_mask) {
740                         float effective_mask = fmask ? *fmask
741                                                      : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
742                         GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, *v_index, &effective_mask);
743                         *empty_mask = *empty_mask && (effective_mask == 0.0f);
744                 }
745
746                 /* Assign index for use in the triangle index buffer */
747                 /* note: caller must set:  bm->elem_index_dirty |= BM_VERT; */
748                 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
749
750                 (*v_index)++;
751         }
752 }
753
754 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
755 static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts,
756                                         GSet *bm_other_verts)
757 {
758         GSetIterator gs_iter;
759         int totvert = 0;
760
761         GSET_ITER (gs_iter, bm_unique_verts) {
762                 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
763                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
764                         totvert++;
765         }
766         GSET_ITER (gs_iter, bm_other_verts) {
767                 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
768                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
769                         totvert++;
770         }
771
772         return totvert;
773 }
774
775 /* Return the total number of visible faces */
776 static int gpu_bmesh_face_visible_count(GSet *bm_faces)
777 {
778         GSetIterator gh_iter;
779         int totface = 0;
780
781         GSET_ITER (gh_iter, bm_faces) {
782                 BMFace *f = BLI_gsetIterator_getKey(&gh_iter);
783
784                 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN))
785                         totface++;
786         }
787
788         return totface;
789 }
790
791 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
792  * shading, an element index buffer. */
793 void GPU_pbvh_bmesh_buffers_update(
794         GPU_PBVH_Buffers *buffers,
795         BMesh *bm,
796         GSet *bm_faces,
797         GSet *bm_unique_verts,
798         GSet *bm_other_verts,
799         const int update_flags)
800 {
801         const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
802         int tottri, totvert, maxvert = 0;
803         bool empty_mask = true;
804
805         /* TODO, make mask layer optional for bmesh buffer */
806         const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
807
808         /* Count visible triangles */
809         tottri = gpu_bmesh_face_visible_count(bm_faces);
810
811         if (buffers->smooth) {
812                 /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
813                 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
814                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
815                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
816                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
817                 /* Count visible vertices */
818                 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
819         }
820         else {
821                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
822                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
823                 totvert = tottri * 3;
824         }
825
826         if (!tottri) {
827                 buffers->tot_tri = 0;
828                 return;
829         }
830
831         /* Fill vertex buffer */
832         if (gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
833                 int v_index = 0;
834
835                 if (buffers->smooth) {
836                         GSetIterator gs_iter;
837
838                         /* Vertices get an index assigned for use in the triangle
839                          * index buffer */
840                         bm->elem_index_dirty |= BM_VERT;
841
842                         GSET_ITER (gs_iter, bm_unique_verts) {
843                                 gpu_bmesh_vert_to_buffer_copy__gwn(
844                                         BLI_gsetIterator_getKey(&gs_iter),
845                                         buffers->vert_buf, &v_index, NULL, NULL,
846                                         cd_vert_mask_offset,
847                                         show_mask, &empty_mask);
848                         }
849
850                         GSET_ITER (gs_iter, bm_other_verts) {
851                                 gpu_bmesh_vert_to_buffer_copy__gwn(
852                                         BLI_gsetIterator_getKey(&gs_iter),
853                                         buffers->vert_buf, &v_index, NULL, NULL,
854                                         cd_vert_mask_offset,
855                                         show_mask, &empty_mask);
856                         }
857
858                         maxvert = v_index;
859                 }
860                 else {
861                         GSetIterator gs_iter;
862
863                         GPUIndexBufBuilder elb_lines;
864                         GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
865
866                         GSET_ITER (gs_iter, bm_faces) {
867                                 BMFace *f = BLI_gsetIterator_getKey(&gs_iter);
868
869                                 BLI_assert(f->len == 3);
870
871                                 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
872                                         BMVert *v[3];
873                                         float fmask = 0.0f;
874                                         int i;
875
876                                         BM_face_as_array_vert_tri(f, v);
877
878                                         /* Average mask value */
879                                         for (i = 0; i < 3; i++) {
880                                                 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
881                                         }
882                                         fmask /= 3.0f;
883
884                                         GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
885                                         GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
886                                         GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
887
888                                         for (i = 0; i < 3; i++) {
889                                                 gpu_bmesh_vert_to_buffer_copy__gwn(
890                                                         v[i], buffers->vert_buf,
891                                                         &v_index, f->no, &fmask,
892                                                         cd_vert_mask_offset,
893                                                         show_mask, &empty_mask);
894                                         }
895                                 }
896                         }
897
898                         buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
899                         buffers->tot_tri = tottri;
900                 }
901
902                 /* gpu_bmesh_vert_to_buffer_copy sets dirty index values */
903                 bm->elem_index_dirty |= BM_VERT;
904         }
905         else {
906                 /* Memory map failed */
907                 return;
908         }
909
910         if (buffers->smooth) {
911                 /* Fill the triangle buffer */
912                 GPUIndexBufBuilder elb, elb_lines;
913                 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, maxvert);
914                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, maxvert);
915
916                 /* Fill triangle index buffer */
917                 {
918                         GSetIterator gs_iter;
919
920                         GSET_ITER (gs_iter, bm_faces) {
921                                 BMFace *f = BLI_gsetIterator_getKey(&gs_iter);
922
923                                 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
924                                         BMVert *v[3];
925
926                                         BM_face_as_array_vert_tri(f, v);
927
928                                         uint idx[3] = {BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2])};
929                                         GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
930
931                                         GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
932                                         GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
933                                         GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
934                                 }
935                         }
936
937                         buffers->tot_tri = tottri;
938
939                         if (buffers->index_buf == NULL) {
940                                 buffers->index_buf = GPU_indexbuf_build(&elb);
941                         }
942                         else {
943                                 GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
944                         }
945
946                         buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
947                 }
948         }
949
950         buffers->show_mask = !empty_mask;
951
952         gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
953 }
954
955 /** \} */
956
957 /* -------------------------------------------------------------------- */
958 /** \name Generic
959  * \{ */
960
961 GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
962 {
963         GPU_PBVH_Buffers *buffers;
964
965         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
966         buffers->use_bmesh = true;
967         buffers->smooth = smooth_shading;
968         buffers->show_mask = true;
969
970         return buffers;
971 }
972
973 GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
974 {
975         if (wires) {
976                 return (fast && buffers->lines_fast) ?
977                         buffers->lines_fast : buffers->lines;
978         }
979         else {
980                 return (fast && buffers->triangles_fast) ?
981                         buffers->triangles_fast : buffers->triangles;
982         }
983 }
984
985 bool GPU_pbvh_buffers_has_mask(GPU_PBVH_Buffers *buffers)
986 {
987         return buffers->show_mask;
988 }
989
990 void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
991 {
992         if (buffers) {
993                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
994                 GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
995                 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
996                 GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
997                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
998                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
999                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
1000                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
1001                 GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
1002
1003                 MEM_freeN(buffers);
1004         }
1005 }
1006
1007 /** \} */
1008
1009 /* -------------------------------------------------------------------- */
1010 /** \name Debug
1011  * \{ */
1012
1013 /* debug function, draws the pbvh BB */
1014 void GPU_pbvh_BB_draw(float min[3], float max[3], bool leaf, uint pos)
1015 {
1016         if (leaf)
1017                 immUniformColor4f(0.0, 1.0, 0.0, 0.5);
1018         else
1019                 immUniformColor4f(1.0, 0.0, 0.0, 0.5);
1020
1021         /* TODO(merwin): revisit this after we have mutable VertexBuffers
1022          * could keep a static batch & index buffer, change the VBO contents per draw
1023          */
1024
1025         immBegin(GPU_PRIM_LINES, 24);
1026
1027         /* top */
1028         immVertex3f(pos, min[0], min[1], max[2]);
1029         immVertex3f(pos, min[0], max[1], max[2]);
1030
1031         immVertex3f(pos, min[0], max[1], max[2]);
1032         immVertex3f(pos, max[0], max[1], max[2]);
1033
1034         immVertex3f(pos, max[0], max[1], max[2]);
1035         immVertex3f(pos, max[0], min[1], max[2]);
1036
1037         immVertex3f(pos, max[0], min[1], max[2]);
1038         immVertex3f(pos, min[0], min[1], max[2]);
1039
1040         /* bottom */
1041         immVertex3f(pos, min[0], min[1], min[2]);
1042         immVertex3f(pos, min[0], max[1], min[2]);
1043
1044         immVertex3f(pos, min[0], max[1], min[2]);
1045         immVertex3f(pos, max[0], max[1], min[2]);
1046
1047         immVertex3f(pos, max[0], max[1], min[2]);
1048         immVertex3f(pos, max[0], min[1], min[2]);
1049
1050         immVertex3f(pos, max[0], min[1], min[2]);
1051         immVertex3f(pos, min[0], min[1], min[2]);
1052
1053         /* sides */
1054         immVertex3f(pos, min[0], min[1], min[2]);
1055         immVertex3f(pos, min[0], min[1], max[2]);
1056
1057         immVertex3f(pos, min[0], max[1], min[2]);
1058         immVertex3f(pos, min[0], max[1], max[2]);
1059
1060         immVertex3f(pos, max[0], max[1], min[2]);
1061         immVertex3f(pos, max[0], max[1], max[2]);
1062
1063         immVertex3f(pos, max[0], min[1], min[2]);
1064         immVertex3f(pos, max[0], min[1], max[2]);
1065
1066         immEnd();
1067 }
1068
1069 /** \} */
1070
1071 void GPU_pbvh_fix_linking()
1072 {
1073 }