Merge branch 'blender2.7'
[blender.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2005 Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup gpu
22  *
23  * Mesh drawing using OpenGL VBO (Vertex Buffer Objects)
24  */
25
26 #include <limits.h>
27 #include <stddef.h>
28 #include <string.h>
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_bitmap.h"
33 #include "BLI_math.h"
34 #include "BLI_utildefines.h"
35 #include "BLI_ghash.h"
36
37 #include "DNA_meshdata_types.h"
38
39 #include "BKE_ccg.h"
40 #include "BKE_DerivedMesh.h"
41 #include "BKE_paint.h"
42 #include "BKE_mesh.h"
43 #include "BKE_pbvh.h"
44
45 #include "GPU_buffers.h"
46 #include "GPU_draw.h"
47 #include "GPU_immediate.h"
48 #include "GPU_batch.h"
49
50 #include "bmesh.h"
51
52 /* XXX: the rest of the code in this file is used for optimized PBVH
53  * drawing and doesn't interact at all with the buffer code above */
54
55 struct GPU_PBVH_Buffers {
56         GPUIndexBuf *index_buf, *index_buf_fast;
57         GPUIndexBuf *index_lines_buf, *index_lines_buf_fast;
58         GPUVertBuf *vert_buf;
59
60         GPUBatch *lines;
61         GPUBatch *lines_fast;
62         GPUBatch *triangles;
63         GPUBatch *triangles_fast;
64
65         /* mesh pointers in case buffer allocation fails */
66         const MPoly *mpoly;
67         const MLoop *mloop;
68         const MLoopTri *looptri;
69         const MVert *mvert;
70
71         const int *face_indices;
72         int        face_indices_len;
73
74         /* grid pointers */
75         CCGKey gridkey;
76         CCGElem **grids;
77         const DMFlagMat *grid_flag_mats;
78         BLI_bitmap * const *grid_hidden;
79         const int *grid_indices;
80         int totgrid;
81
82         bool use_bmesh;
83
84         uint tot_tri, tot_quad;
85
86         /* The PBVH ensures that either all faces in the node are
87          * smooth-shaded or all faces are flat-shaded */
88         bool smooth;
89
90         bool show_mask;
91 };
92
93 static struct {
94         uint pos, nor, msk;
95 } g_vbo_id = {0};
96
97 /** \} */
98
99 /* -------------------------------------------------------------------- */
100 /** \name PBVH Utils
101  * \{ */
102
103 /* Allocates a non-initialized buffer to be sent to GPU.
104  * Return is false it indicates that the memory map failed. */
105 static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
106 {
107         if (buffers->vert_buf == NULL) {
108                 /* Initialize vertex buffer */
109                 /* match 'VertexBufferFormat' */
110
111                 static GPUVertFormat format = {0};
112                 if (format.attr_len == 0) {
113                         g_vbo_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
114                         g_vbo_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
115                         g_vbo_id.msk = GPU_vertformat_attr_add(&format, "msk", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
116                 }
117 #if 0
118                 buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_DYNAMIC);
119                 GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
120         }
121         else if (vert_len != buffers->vert_buf->vertex_len) {
122                 GPU_vertbuf_data_resize(buffers->vert_buf, vert_len);
123         }
124 #else
125                 buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STATIC);
126         }
127         GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
128 #endif
129         return buffers->vert_buf->data != NULL;
130 }
131
132 static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
133 {
134         /* force flushing to the GPU */
135         if (buffers->vert_buf->data) {
136                 GPU_vertbuf_use(buffers->vert_buf);
137         }
138
139         if (buffers->triangles == NULL) {
140                 buffers->triangles = GPU_batch_create(
141                         prim, buffers->vert_buf,
142                         /* can be NULL */
143                         buffers->index_buf);
144         }
145
146         if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
147                 buffers->triangles_fast = GPU_batch_create(
148                         prim, buffers->vert_buf,
149                         buffers->index_buf_fast);
150         }
151
152         if (buffers->lines == NULL) {
153                 BLI_assert(buffers->index_lines_buf != NULL);
154                 buffers->lines = GPU_batch_create(
155                         GPU_PRIM_LINES, buffers->vert_buf,
156                         buffers->index_lines_buf);
157         }
158
159         if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
160                 buffers->lines_fast = GPU_batch_create(
161                         GPU_PRIM_LINES, buffers->vert_buf,
162                         buffers->index_lines_buf_fast);
163         }
164 }
165
166 /** \} */
167
168 /* -------------------------------------------------------------------- */
169 /** \name Mesh PBVH
170  * \{ */
171
172 void GPU_pbvh_mesh_buffers_update(
173         GPU_PBVH_Buffers *buffers, const MVert *mvert,
174         const int *vert_indices, int totvert, const float *vmask,
175         const int (*face_vert_indices)[3],
176         const int update_flags)
177 {
178         const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
179         bool empty_mask = true;
180
181         {
182                 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
183
184                 /* Build VBO */
185                 if (gpu_pbvh_vert_buf_data_set(buffers, totelem)) {
186                         /* Vertex data is shared if smooth-shaded, but separate
187                          * copies are made for flat shading because normals
188                          * shouldn't be shared. */
189                         if (buffers->smooth) {
190                                 for (uint i = 0; i < totvert; ++i) {
191                                         const MVert *v = &mvert[vert_indices[i]];
192                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, i, v->co);
193                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, i, v->no);
194                                 }
195
196                                 if (vmask && show_mask) {
197                                         for (uint i = 0; i < buffers->face_indices_len; i++) {
198                                                 const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
199                                                 for (uint j = 0; j < 3; j++) {
200                                                         int vidx = face_vert_indices[i][j];
201                                                         int v_index = buffers->mloop[lt->tri[j]].v;
202                                                         float fmask = vmask[v_index];
203                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vidx, &fmask);
204                                                         empty_mask = empty_mask && (fmask == 0.0f);
205                                                 }
206                                         }
207                                 }
208                         }
209                         else {
210                                 /* calculate normal for each polygon only once */
211                                 uint mpoly_prev = UINT_MAX;
212                                 short no[3];
213                                 int vbo_index = 0;
214
215                                 for (uint i = 0; i < buffers->face_indices_len; i++) {
216                                         const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
217                                         const uint vtri[3] = {
218                                             buffers->mloop[lt->tri[0]].v,
219                                             buffers->mloop[lt->tri[1]].v,
220                                             buffers->mloop[lt->tri[2]].v,
221                                         };
222
223                                         if (paint_is_face_hidden(lt, mvert, buffers->mloop))
224                                                 continue;
225
226                                         /* Face normal and mask */
227                                         if (lt->poly != mpoly_prev) {
228                                                 const MPoly *mp = &buffers->mpoly[lt->poly];
229                                                 float fno[3];
230                                                 BKE_mesh_calc_poly_normal(mp, &buffers->mloop[mp->loopstart], mvert, fno);
231                                                 normal_float_to_short_v3(no, fno);
232                                                 mpoly_prev = lt->poly;
233                                         }
234
235                                         float fmask = 0.0f;
236                                         if (vmask && show_mask) {
237                                                 fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f;
238                                         }
239
240                                         for (uint j = 0; j < 3; j++) {
241                                                 const MVert *v = &mvert[vtri[j]];
242
243                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, v->co);
244                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no);
245                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask);
246
247                                                 vbo_index++;
248                                         }
249
250                                         empty_mask = empty_mask && (fmask == 0.0f);
251                                 }
252                         }
253
254                         gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
255                 }
256         }
257
258         buffers->show_mask = !empty_mask;
259         buffers->mvert = mvert;
260 }
261
262 GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
263         const int (*face_vert_indices)[3],
264         const MPoly *mpoly, const MLoop *mloop, const MLoopTri *looptri,
265         const MVert *mvert,
266         const int *face_indices,
267         const int  face_indices_len)
268 {
269         GPU_PBVH_Buffers *buffers;
270         int i, tottri;
271
272         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
273
274         /* smooth or flat for all */
275         buffers->smooth = mpoly[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
276
277         buffers->show_mask = false;
278
279         /* Count the number of visible triangles */
280         for (i = 0, tottri = 0; i < face_indices_len; ++i) {
281                 const MLoopTri *lt = &looptri[face_indices[i]];
282                 if (!paint_is_face_hidden(lt, mvert, mloop))
283                         tottri++;
284         }
285
286         if (tottri == 0) {
287                 buffers->tot_tri = 0;
288
289                 buffers->mpoly = mpoly;
290                 buffers->mloop = mloop;
291                 buffers->looptri = looptri;
292                 buffers->face_indices = face_indices;
293                 buffers->face_indices_len = 0;
294
295                 return buffers;
296         }
297
298         GPU_BATCH_DISCARD_SAFE(buffers->triangles);
299         GPU_BATCH_DISCARD_SAFE(buffers->lines);
300         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
301         GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
302
303         /* An element index buffer is used for smooth shading, but flat
304          * shading requires separate vertex normals so an index buffer
305          * can't be used there. */
306         if (buffers->smooth) {
307                 /* Fill the triangle and line buffers. */
308                 GPUIndexBufBuilder elb, elb_lines;
309                 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, INT_MAX);
310                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
311
312                 for (i = 0; i < face_indices_len; ++i) {
313                         const MLoopTri *lt = &looptri[face_indices[i]];
314
315                         /* Skip hidden faces */
316                         if (paint_is_face_hidden(lt, mvert, mloop))
317                                 continue;
318
319                         GPU_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i]));
320
321                         /* TODO skip "non-real" edges. */
322                         GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][0], face_vert_indices[i][1]);
323                         GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][1], face_vert_indices[i][2]);
324                         GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][2], face_vert_indices[i][0]);
325                 }
326                 buffers->index_buf = GPU_indexbuf_build(&elb);
327                 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
328         }
329         else {
330                 /* Fill the only the line buffer. */
331                 GPUIndexBufBuilder elb_lines;
332                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
333
334                 for (i = 0; i < face_indices_len; ++i) {
335                         const MLoopTri *lt = &looptri[face_indices[i]];
336
337                         /* Skip hidden faces */
338                         if (paint_is_face_hidden(lt, mvert, mloop))
339                                 continue;
340
341                         /* TODO skip "non-real" edges. */
342                         GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 0, i * 3 + 1);
343                         GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 1, i * 3 + 2);
344                         GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 2, i * 3 + 0);
345                 }
346                 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
347         }
348
349         buffers->tot_tri = tottri;
350
351         buffers->mpoly = mpoly;
352         buffers->mloop = mloop;
353         buffers->looptri = looptri;
354
355         buffers->face_indices = face_indices;
356         buffers->face_indices_len = face_indices_len;
357
358         return buffers;
359 }
360
361 /** \} */
362
363 /* -------------------------------------------------------------------- */
364 /** \name Grid PBVH
365  * \{ */
366
367 static void gpu_pbvh_grid_fill_index_buffers(
368         GPU_PBVH_Buffers *buffers,
369         int *grid_indices,
370         uint visible_quad_len,
371         int totgrid,
372         int gridsize)
373 {
374         GPUIndexBufBuilder elb, elb_lines;
375         GPUIndexBufBuilder elb_fast, elb_lines_fast;
376
377         GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
378         GPU_indexbuf_init(&elb_fast, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
379         GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
380         GPU_indexbuf_init(&elb_lines_fast, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
381
382         if (buffers->smooth) {
383                 uint offset = 0;
384                 const uint grid_vert_len = gridsize * gridsize;
385                 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
386                         uint v0, v1, v2, v3;
387                         bool grid_visible = false;
388
389                         BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
390
391                         for (int j = 0; j < gridsize - 1; ++j) {
392                                 for (int k = 0; k < gridsize - 1; ++k) {
393                                         /* Skip hidden grid face */
394                                         if (gh && paint_is_grid_face_hidden(
395                                                 gh, gridsize, k, j))
396                                         {
397                                                 continue;
398                                         }
399                                         /* Indices in a Clockwise QUAD disposition. */
400                                         v0 = offset + j * gridsize + k;
401                                         v1 = v0 + 1;
402                                         v2 = v1 + gridsize;
403                                         v3 = v2 - 1;
404
405                                         GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
406                                         GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
407
408                                         GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
409                                         GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
410
411                                         if (j + 2 == gridsize) {
412                                                 GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
413                                         }
414                                         grid_visible = true;
415                                 }
416                                 GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
417                         }
418
419                         if (grid_visible) {
420                                 /* Grid corners */
421                                 v0 = offset;
422                                 v1 = offset + gridsize - 1;
423                                 v2 = offset + grid_vert_len - 1;
424                                 v3 = offset + grid_vert_len - gridsize;
425
426                                 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
427                                 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
428
429                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
430                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
431                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
432                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
433                         }
434                 }
435         }
436         else {
437                 uint offset = 0;
438                 const uint grid_vert_len = SQUARE(gridsize - 1) * 4;
439                 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
440                         bool grid_visible = false;
441
442                         BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
443
444                         uint v0, v1, v2, v3;
445                         for (int j = 0; j < gridsize - 1; j++) {
446                                 for (int k = 0; k < gridsize - 1; k++) {
447                                         /* Skip hidden grid face */
448                                         if (gh && paint_is_grid_face_hidden(
449                                                 gh, gridsize, k, j))
450                                         {
451                                                 continue;
452                                         }
453                                         /* VBO data are in a Clockwise QUAD disposition. */
454                                         v0 = offset + (j * (gridsize - 1) + k) * 4;
455                                         v1 = v0 + 1;
456                                         v2 = v0 + 2;
457                                         v3 = v0 + 3;
458
459                                         GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
460                                         GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
461
462                                         GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
463                                         GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
464
465                                         if (j + 2 == gridsize) {
466                                                 GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
467                                         }
468                                         grid_visible = true;
469                                 }
470                                 GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
471                         }
472
473                         if (grid_visible) {
474                                 /* Grid corners */
475                                 v0 = offset;
476                                 v1 = offset + (gridsize - 1) * 4 - 3;
477                                 v2 = offset + grid_vert_len - 2;
478                                 v3 = offset + grid_vert_len - (gridsize - 1) * 4 + 3;
479
480                                 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
481                                 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
482
483                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
484                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
485                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
486                                 GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
487                         }
488                 }
489         }
490
491         buffers->index_buf = GPU_indexbuf_build(&elb);
492         buffers->index_buf_fast = GPU_indexbuf_build(&elb_fast);
493         buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
494         buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines_fast);
495 }
496
497 void GPU_pbvh_grid_buffers_update(
498         GPU_PBVH_Buffers *buffers, CCGElem **grids,
499         const DMFlagMat *grid_flag_mats, int *grid_indices,
500         int totgrid, const CCGKey *key,
501         const int update_flags)
502 {
503         const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
504         bool empty_mask = true;
505         int i, j, k, x, y;
506
507         const bool smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
508
509         /* Build VBO */
510         const int has_mask = key->has_mask;
511
512         uint vert_per_grid = (smooth) ? key->grid_area : (SQUARE(key->grid_size - 1) * 4);
513         uint vert_count = totgrid * vert_per_grid;
514
515         if (buffers->smooth != smooth) {
516                 buffers->smooth = smooth;
517                 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
518                 GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
519                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
520                 GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
521
522                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
523                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
524                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
525                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
526         }
527
528         if (buffers->index_buf == NULL) {
529                 uint visible_quad_len = BKE_pbvh_count_grid_quads((BLI_bitmap **)buffers->grid_hidden,
530                                                                   grid_indices,
531                                                                   totgrid,
532                                                                   key->grid_size);
533
534                 /* totally hidden node, return here to avoid BufferData with zero below. */
535                 if (visible_quad_len == 0) {
536                         return;
537                 }
538
539                 gpu_pbvh_grid_fill_index_buffers(buffers, grid_indices, visible_quad_len, totgrid, key->grid_size);
540         }
541
542         uint vbo_index_offset = 0;
543         /* Build VBO */
544         if (gpu_pbvh_vert_buf_data_set(buffers, vert_count)) {
545                 GPUIndexBufBuilder elb_lines;
546
547                 if (buffers->index_lines_buf == NULL) {
548                         GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
549                 }
550
551                 for (i = 0; i < totgrid; ++i) {
552                         CCGElem *grid = grids[grid_indices[i]];
553                         int vbo_index = vbo_index_offset;
554
555                         if (buffers->smooth) {
556                                 for (y = 0; y < key->grid_size; y++) {
557                                         for (x = 0; x < key->grid_size; x++) {
558                                                 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
559                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
560
561                                                 short no_short[3];
562                                                 normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
563                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
564
565                                                 if (has_mask && show_mask) {
566                                                         float fmask = *CCG_elem_mask(key, elem);
567                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask);
568                                                         empty_mask = empty_mask && (fmask == 0.0f);
569                                                 }
570                                                 vbo_index += 1;
571                                         }
572                                 }
573                                 vbo_index_offset += key->grid_area;
574                         }
575                         else {
576                                 for (j = 0; j < key->grid_size - 1; j++) {
577                                         for (k = 0; k < key->grid_size - 1; k++) {
578                                                 CCGElem *elems[4] = {
579                                                         CCG_grid_elem(key, grid, k, j),
580                                                         CCG_grid_elem(key, grid, k + 1, j),
581                                                         CCG_grid_elem(key, grid, k + 1, j + 1),
582                                                         CCG_grid_elem(key, grid, k, j + 1),
583                                                 };
584                                                 float *co[4] = {
585                                                     CCG_elem_co(key, elems[0]),
586                                                     CCG_elem_co(key, elems[1]),
587                                                     CCG_elem_co(key, elems[2]),
588                                                     CCG_elem_co(key, elems[3]),
589                                                 };
590
591                                                 float fno[3];
592                                                 short no_short[3];
593                                                 /* Note: Clockwise indices ordering, that's why we invert order here. */
594                                                 normal_quad_v3(fno, co[3], co[2], co[1], co[0]);
595                                                 normal_float_to_short_v3(no_short, fno);
596
597                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 0, co[0]);
598                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 0, no_short);
599                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 1, co[1]);
600                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 1, no_short);
601                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 2, co[2]);
602                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 2, no_short);
603                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 3, co[3]);
604                                                 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 3, no_short);
605
606                                                 if (has_mask && show_mask) {
607                                                         float fmask = (*CCG_elem_mask(key, elems[0]) +
608                                                                        *CCG_elem_mask(key, elems[1]) +
609                                                                        *CCG_elem_mask(key, elems[2]) +
610                                                                        *CCG_elem_mask(key, elems[3])) * 0.25f;
611                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 0, &fmask);
612                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 1, &fmask);
613                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 2, &fmask);
614                                                         GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 3, &fmask);
615                                                         empty_mask = empty_mask && (fmask == 0.0f);
616                                                 }
617                                                 vbo_index += 4;
618                                         }
619                                 }
620                                 vbo_index_offset += SQUARE(key->grid_size - 1) * 4;
621                         }
622                 }
623
624                 gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
625         }
626
627         buffers->grids = grids;
628         buffers->grid_indices = grid_indices;
629         buffers->totgrid = totgrid;
630         buffers->grid_flag_mats = grid_flag_mats;
631         buffers->gridkey = *key;
632         buffers->show_mask = !empty_mask;
633
634         //printf("node updated %p\n", buffers);
635 }
636
637 GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(
638         int totgrid,
639         BLI_bitmap **grid_hidden)
640 {
641         GPU_PBVH_Buffers *buffers;
642
643         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
644         buffers->grid_hidden = grid_hidden;
645         buffers->totgrid = totgrid;
646
647         buffers->show_mask = false;
648
649         return buffers;
650 }
651
652 #undef FILL_QUAD_BUFFER
653
654 /** \} */
655
656 /* -------------------------------------------------------------------- */
657 /** \name BMesh PBVH
658  * \{ */
659
660 /* Output a BMVert into a VertexBufferFormat array
661  *
662  * The vertex is skipped if hidden, otherwise the output goes into
663  * index '*v_index' in the 'vert_data' array and '*v_index' is
664  * incremented.
665  */
666 static void gpu_bmesh_vert_to_buffer_copy__gwn(
667         BMVert *v,
668         GPUVertBuf *vert_buf,
669         int *v_index,
670         const float fno[3],
671         const float *fmask,
672         const int cd_vert_mask_offset,
673         const bool show_mask,
674         bool *empty_mask)
675 {
676         if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
677
678                 /* Set coord, normal, and mask */
679                 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, *v_index, v->co);
680
681                 short no_short[3];
682                 normal_float_to_short_v3(no_short, fno ? fno : v->no);
683                 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, *v_index, no_short);
684
685                 if (show_mask) {
686                         float effective_mask = fmask ? *fmask
687                                                      : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
688                         GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, *v_index, &effective_mask);
689                         *empty_mask = *empty_mask && (effective_mask == 0.0f);
690                 }
691
692                 /* Assign index for use in the triangle index buffer */
693                 /* note: caller must set:  bm->elem_index_dirty |= BM_VERT; */
694                 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
695
696                 (*v_index)++;
697         }
698 }
699
700 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
701 static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts,
702                                         GSet *bm_other_verts)
703 {
704         GSetIterator gs_iter;
705         int totvert = 0;
706
707         GSET_ITER (gs_iter, bm_unique_verts) {
708                 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
709                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
710                         totvert++;
711         }
712         GSET_ITER (gs_iter, bm_other_verts) {
713                 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
714                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
715                         totvert++;
716         }
717
718         return totvert;
719 }
720
721 /* Return the total number of visible faces */
722 static int gpu_bmesh_face_visible_count(GSet *bm_faces)
723 {
724         GSetIterator gh_iter;
725         int totface = 0;
726
727         GSET_ITER (gh_iter, bm_faces) {
728                 BMFace *f = BLI_gsetIterator_getKey(&gh_iter);
729
730                 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN))
731                         totface++;
732         }
733
734         return totface;
735 }
736
737 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
738  * shading, an element index buffer. */
739 void GPU_pbvh_bmesh_buffers_update(
740         GPU_PBVH_Buffers *buffers,
741         BMesh *bm,
742         GSet *bm_faces,
743         GSet *bm_unique_verts,
744         GSet *bm_other_verts,
745         const int update_flags)
746 {
747         const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
748         int tottri, totvert, maxvert = 0;
749         bool empty_mask = true;
750
751         /* TODO, make mask layer optional for bmesh buffer */
752         const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
753
754         /* Count visible triangles */
755         tottri = gpu_bmesh_face_visible_count(bm_faces);
756
757         if (buffers->smooth) {
758                 /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
759                 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
760                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
761                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
762                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
763                 /* Count visible vertices */
764                 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
765         }
766         else {
767                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
768                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
769                 totvert = tottri * 3;
770         }
771
772         if (!tottri) {
773                 buffers->tot_tri = 0;
774                 return;
775         }
776
777         /* Fill vertex buffer */
778         if (gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
779                 int v_index = 0;
780
781                 if (buffers->smooth) {
782                         GSetIterator gs_iter;
783
784                         /* Vertices get an index assigned for use in the triangle
785                          * index buffer */
786                         bm->elem_index_dirty |= BM_VERT;
787
788                         GSET_ITER (gs_iter, bm_unique_verts) {
789                                 gpu_bmesh_vert_to_buffer_copy__gwn(
790                                         BLI_gsetIterator_getKey(&gs_iter),
791                                         buffers->vert_buf, &v_index, NULL, NULL,
792                                         cd_vert_mask_offset,
793                                         show_mask, &empty_mask);
794                         }
795
796                         GSET_ITER (gs_iter, bm_other_verts) {
797                                 gpu_bmesh_vert_to_buffer_copy__gwn(
798                                         BLI_gsetIterator_getKey(&gs_iter),
799                                         buffers->vert_buf, &v_index, NULL, NULL,
800                                         cd_vert_mask_offset,
801                                         show_mask, &empty_mask);
802                         }
803
804                         maxvert = v_index;
805                 }
806                 else {
807                         GSetIterator gs_iter;
808
809                         GPUIndexBufBuilder elb_lines;
810                         GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
811
812                         GSET_ITER (gs_iter, bm_faces) {
813                                 BMFace *f = BLI_gsetIterator_getKey(&gs_iter);
814
815                                 BLI_assert(f->len == 3);
816
817                                 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
818                                         BMVert *v[3];
819                                         float fmask = 0.0f;
820                                         int i;
821
822                                         BM_face_as_array_vert_tri(f, v);
823
824                                         /* Average mask value */
825                                         for (i = 0; i < 3; i++) {
826                                                 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
827                                         }
828                                         fmask /= 3.0f;
829
830                                         GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
831                                         GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
832                                         GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
833
834                                         for (i = 0; i < 3; i++) {
835                                                 gpu_bmesh_vert_to_buffer_copy__gwn(
836                                                         v[i], buffers->vert_buf,
837                                                         &v_index, f->no, &fmask,
838                                                         cd_vert_mask_offset,
839                                                         show_mask, &empty_mask);
840                                         }
841                                 }
842                         }
843
844                         buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
845                         buffers->tot_tri = tottri;
846                 }
847
848                 /* gpu_bmesh_vert_to_buffer_copy sets dirty index values */
849                 bm->elem_index_dirty |= BM_VERT;
850         }
851         else {
852                 /* Memory map failed */
853                 return;
854         }
855
856         if (buffers->smooth) {
857                 /* Fill the triangle buffer */
858                 GPUIndexBufBuilder elb, elb_lines;
859                 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, maxvert);
860                 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, maxvert);
861
862                 /* Fill triangle index buffer */
863                 {
864                         GSetIterator gs_iter;
865
866                         GSET_ITER (gs_iter, bm_faces) {
867                                 BMFace *f = BLI_gsetIterator_getKey(&gs_iter);
868
869                                 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
870                                         BMVert *v[3];
871
872                                         BM_face_as_array_vert_tri(f, v);
873
874                                         uint idx[3] = {BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2])};
875                                         GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
876
877                                         GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
878                                         GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
879                                         GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
880                                 }
881                         }
882
883                         buffers->tot_tri = tottri;
884
885                         if (buffers->index_buf == NULL) {
886                                 buffers->index_buf = GPU_indexbuf_build(&elb);
887                         }
888                         else {
889                                 GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
890                         }
891
892                         buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
893                 }
894         }
895
896         buffers->show_mask = !empty_mask;
897
898         gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
899 }
900
901 /** \} */
902
903 /* -------------------------------------------------------------------- */
904 /** \name Generic
905  * \{ */
906
907 GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
908 {
909         GPU_PBVH_Buffers *buffers;
910
911         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
912         buffers->use_bmesh = true;
913         buffers->smooth = smooth_shading;
914         buffers->show_mask = true;
915
916         return buffers;
917 }
918
919 GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
920 {
921         if (wires) {
922                 return (fast && buffers->lines_fast) ?
923                         buffers->lines_fast : buffers->lines;
924         }
925         else {
926                 return (fast && buffers->triangles_fast) ?
927                         buffers->triangles_fast : buffers->triangles;
928         }
929 }
930
931 bool GPU_pbvh_buffers_has_mask(GPU_PBVH_Buffers *buffers)
932 {
933         return buffers->show_mask;
934 }
935
936 void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
937 {
938         if (buffers) {
939                 GPU_BATCH_DISCARD_SAFE(buffers->lines);
940                 GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
941                 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
942                 GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
943                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
944                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
945                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
946                 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
947                 GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
948
949                 MEM_freeN(buffers);
950         }
951 }
952
953 /** \} */
954
955 /* -------------------------------------------------------------------- */
956 /** \name Debug
957  * \{ */
958
959 /* debug function, draws the pbvh BB */
960 void GPU_pbvh_BB_draw(float min[3], float max[3], bool leaf, uint pos)
961 {
962         if (leaf)
963                 immUniformColor4f(0.0, 1.0, 0.0, 0.5);
964         else
965                 immUniformColor4f(1.0, 0.0, 0.0, 0.5);
966
967         /* TODO(merwin): revisit this after we have mutable VertexBuffers
968          * could keep a static batch & index buffer, change the VBO contents per draw
969          */
970
971         immBegin(GPU_PRIM_LINES, 24);
972
973         /* top */
974         immVertex3f(pos, min[0], min[1], max[2]);
975         immVertex3f(pos, min[0], max[1], max[2]);
976
977         immVertex3f(pos, min[0], max[1], max[2]);
978         immVertex3f(pos, max[0], max[1], max[2]);
979
980         immVertex3f(pos, max[0], max[1], max[2]);
981         immVertex3f(pos, max[0], min[1], max[2]);
982
983         immVertex3f(pos, max[0], min[1], max[2]);
984         immVertex3f(pos, min[0], min[1], max[2]);
985
986         /* bottom */
987         immVertex3f(pos, min[0], min[1], min[2]);
988         immVertex3f(pos, min[0], max[1], min[2]);
989
990         immVertex3f(pos, min[0], max[1], min[2]);
991         immVertex3f(pos, max[0], max[1], min[2]);
992
993         immVertex3f(pos, max[0], max[1], min[2]);
994         immVertex3f(pos, max[0], min[1], min[2]);
995
996         immVertex3f(pos, max[0], min[1], min[2]);
997         immVertex3f(pos, min[0], min[1], min[2]);
998
999         /* sides */
1000         immVertex3f(pos, min[0], min[1], min[2]);
1001         immVertex3f(pos, min[0], min[1], max[2]);
1002
1003         immVertex3f(pos, min[0], max[1], min[2]);
1004         immVertex3f(pos, min[0], max[1], max[2]);
1005
1006         immVertex3f(pos, max[0], max[1], min[2]);
1007         immVertex3f(pos, max[0], max[1], max[2]);
1008
1009         immVertex3f(pos, max[0], min[1], min[2]);
1010         immVertex3f(pos, max[0], min[1], max[2]);
1011
1012         immEnd();
1013 }
1014
1015 /** \} */
1016
1017 void GPU_pbvh_fix_linking()
1018 {
1019 }