Sculpt Draw: Add support for wireframe geometry
authorClément Foucault <foucault.clem@gmail.com>
Thu, 14 Feb 2019 19:24:13 +0000 (20:24 +0100)
committerClément Foucault <foucault.clem@gmail.com>
Mon, 18 Feb 2019 13:17:57 +0000 (14:17 +0100)
This introduce the wireframe batches. Creating the indices buffer does
not seems to slow down the sculpt in my testing (but it is kind of hard to
test reliably)

This includes a bit of cleanup in gpu_buffers.c.

source/blender/blenkernel/BKE_pbvh.h
source/blender/blenkernel/intern/pbvh.c
source/blender/draw/intern/DRW_render.h
source/blender/draw/intern/draw_manager_data.c
source/blender/draw/modes/overlay_mode.c
source/blender/draw/modes/sculpt_mode.c
source/blender/draw/modes/shaders/overlay_face_wireframe_vert.glsl
source/blender/gpu/GPU_buffers.h
source/blender/gpu/intern/gpu_buffers.c

index 8127d68..87d1a6c 100644 (file)
@@ -125,7 +125,7 @@ bool BKE_pbvh_node_find_nearest_to_ray(
 /* Drawing */
 
 void BKE_pbvh_draw_cb(
-        PBVH *bvh, float (*planes)[4], float (*fnors)[3], bool fast, bool only_mask,
+        PBVH *bvh, float (*planes)[4], float (*fnors)[3], bool fast, bool wires, bool only_mask,
         void (*draw_fn)(void *user_data, struct GPUBatch *batch), void *user_data);
 
 /* PBVH Access */
index 45a0872..69617a3 100644 (file)
@@ -2049,6 +2049,7 @@ struct PBVHNodeDrawCallbackData {
        void *user_data;
        bool fast;
        bool only_mask; /* Only draw nodes that have mask data. */
+       bool wires;
 };
 
 static void pbvh_node_draw_cb(PBVHNode *node, void *data_v)
@@ -2056,11 +2057,11 @@ static void pbvh_node_draw_cb(PBVHNode *node, void *data_v)
        struct PBVHNodeDrawCallbackData *data = data_v;
 
        if (!(node->flag & PBVH_FullyHidden)) {
-               GPUBatch *triangles = GPU_pbvh_buffers_batch_get(node->draw_buffers, data->fast);
+               GPUBatch *batch = GPU_pbvh_buffers_batch_get(node->draw_buffers, data->fast, data->wires);
                bool show_mask = GPU_pbvh_buffers_has_mask(node->draw_buffers);
                if (!data->only_mask || show_mask) {
-                       if (triangles != NULL) {
-                               data->draw_fn(data->user_data, triangles);
+                       if (batch != NULL) {
+                               data->draw_fn(data->user_data, batch);
                        }
                }
        }
@@ -2070,12 +2071,13 @@ static void pbvh_node_draw_cb(PBVHNode *node, void *data_v)
  * Version of #BKE_pbvh_draw that runs a callback.
  */
 void BKE_pbvh_draw_cb(
-        PBVH *bvh, float (*planes)[4], float (*fnors)[3], bool fast, bool only_mask,
+        PBVH *bvh, float (*planes)[4], float (*fnors)[3], bool fast, bool wires, bool only_mask,
         void (*draw_fn)(void *user_data, GPUBatch *batch), void *user_data)
 {
        struct PBVHNodeDrawCallbackData draw_data = {
                .only_mask = only_mask,
                .fast = fast,
+               .wires = wires,
                .draw_fn = draw_fn,
                .user_data = user_data,
        };
index 8f78953..259605b 100644 (file)
@@ -378,6 +378,7 @@ void DRW_shgroup_call_instances_add(
 void DRW_shgroup_call_object_instances_add(
         DRWShadingGroup *shgroup, struct GPUBatch *geom, struct Object *ob, uint *count);
 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, struct Object *ob, float (*obmat)[4]);
+void DRW_shgroup_call_sculpt_wires_add(DRWShadingGroup *shgroup, struct Object *ob, float (*obmat)[4]);
 void DRW_shgroup_call_generate_add(
         DRWShadingGroup *shgroup, DRWCallGenerateFn *geometry_fn, void *user_data, float (*obmat)[4]);
 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len);
index 73825c7..54456d4 100644 (file)
@@ -581,7 +581,34 @@ static void sculpt_draw_cb(
 
        if (pbvh) {
                BKE_pbvh_draw_cb(
-                       pbvh, NULL, NULL, fast_mode, false,
+                       pbvh, NULL, NULL, fast_mode, false, false,
+                       (void (*)(void *, GPUBatch *))draw_fn, shgroup);
+       }
+}
+
+static void sculpt_draw_wires_cb(
+        DRWShadingGroup *shgroup,
+        void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
+        void *user_data)
+{
+       Object *ob = user_data;
+
+       /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
+       PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
+
+       const DRWContextState *drwctx = DRW_context_state_get();
+       int fast_mode = 0;
+
+       if (drwctx->evil_C != NULL) {
+               Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
+               if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
+                       fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
+               }
+       }
+
+       if (pbvh) {
+               BKE_pbvh_draw_cb(
+                       pbvh, NULL, NULL, fast_mode, true, false,
                        (void (*)(void *, GPUBatch *))draw_fn, shgroup);
        }
 }
@@ -591,6 +618,11 @@ void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*o
        DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
 }
 
+void DRW_shgroup_call_sculpt_wires_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
+{
+       DRW_shgroup_call_generate_add(shgroup, sculpt_draw_wires_cb, ob, obmat);
+}
+
 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
 {
 #ifdef USE_GPU_SELECT
index ea502e1..9929e4c 100644 (file)
@@ -319,11 +319,11 @@ static void overlay_cache_populate(void *vedata, Object *ob)
                        if (geom || is_sculpt_mode) {
                                shgrp = DRW_shgroup_create_sub(pd->face_wires_shgrp);
 
-                               static float all_wires_param = 10.0f;
-                               DRW_shgroup_uniform_float(
-                                       shgrp, "wireStepParam",
-                                       (all_wires || is_sculpt_mode) ? &all_wires_param : &pd->wire_step_param,
-                                       1);
+                               float wire_step_param = 10.0f;
+                               if (!is_sculpt_mode) {
+                                       wire_step_param = (all_wires) ? 1.0f : pd->wire_step_param;
+                               }
+                               DRW_shgroup_uniform_float_copy(shgrp, "wireStepParam", wire_step_param);
 
                                if (!(DRW_state_is_select() || DRW_state_is_depth())) {
                                        DRW_shgroup_stencil_mask(shgrp, stencil_mask);
@@ -332,7 +332,7 @@ static void overlay_cache_populate(void *vedata, Object *ob)
                                }
 
                                if (is_sculpt_mode) {
-                                       DRW_shgroup_call_sculpt_add(shgrp, ob, ob->obmat);
+                                       DRW_shgroup_call_sculpt_wires_add(shgrp, ob, ob->obmat);
                                }
                                else {
                                        DRW_shgroup_call_add(shgrp, geom, ob->obmat);
index 7631294..546270f 100644 (file)
@@ -177,7 +177,7 @@ static void sculpt_draw_mask_cb(
 
        if (pbvh) {
                BKE_pbvh_draw_cb(
-                       pbvh, NULL, NULL, false, true,
+                       pbvh, NULL, NULL, false, false, true,
                        (void (*)(void *, struct GPUBatch *))draw_fn, shgroup);
        }
 }
index 2a328a7..8cb4db2 100644 (file)
@@ -10,7 +10,7 @@ uniform float ofs;
 #ifndef USE_SCULPT
 float get_edge_sharpness(float wd)
 {
-       return (wd == 1.0) ? 1.0 : ((wd == 0.0) ? -1.0 : (wd + wireStepParam));
+       return ((wd == 1.0) ? 1.0 : ((wd == 0.0) ? -1.5 : wd)) + wireStepParam;
 }
 #else
 float get_edge_sharpness(float wd) { return 1.0; }
index 6e3b8f2..dc634b9 100644 (file)
@@ -80,7 +80,7 @@ void GPU_pbvh_grid_buffers_update(
         const int update_flags);
 
 /* draw */
-struct GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast);
+struct GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires);
 
 bool GPU_pbvh_buffers_has_mask(GPU_PBVH_Buffers *buffers);
 
index 9004ad5..71c55da 100644 (file)
 
 struct GPU_PBVH_Buffers {
        GPUIndexBuf *index_buf, *index_buf_fast;
+       GPUIndexBuf *index_lines_buf, *index_lines_buf_fast;
        GPUVertBuf *vert_buf;
 
+       GPUBatch *lines;
+       GPUBatch *lines_fast;
        GPUBatch *triangles;
        GPUBatch *triangles_fast;
 
@@ -75,8 +78,6 @@ struct GPU_PBVH_Buffers {
        BLI_bitmap * const *grid_hidden;
        const int *grid_indices;
        int totgrid;
-       bool has_hidden;
-       bool is_index_buf_global;  /* Means index_buf uses global bvh's grid_common_gpu_buffer, **DO NOT** free it! */
 
        bool use_bmesh;
 
@@ -93,6 +94,12 @@ static struct {
        uint pos, nor, msk;
 } g_vbo_id = {0};
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name PBVH Utils
+ * \{ */
+
 /* Allocates a non-initialized buffer to be sent to GPU.
  * Return is false it indicates that the memory map failed. */
 static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
@@ -141,8 +148,27 @@ static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
                        prim, buffers->vert_buf,
                        buffers->index_buf_fast);
        }
+
+       if (buffers->lines == NULL) {
+               BLI_assert(buffers->index_lines_buf != NULL);
+               buffers->lines = GPU_batch_create(
+                       GPU_PRIM_LINES, buffers->vert_buf,
+                       buffers->index_lines_buf);
+       }
+
+       if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
+               buffers->lines_fast = GPU_batch_create(
+                       GPU_PRIM_LINES, buffers->vert_buf,
+                       buffers->index_lines_buf_fast);
+       }
 }
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Mesh PBVH
+ * \{ */
+
 void GPU_pbvh_mesh_buffers_update(
         GPU_PBVH_Buffers *buffers, const MVert *mvert,
         const int *vert_indices, int totvert, const float *vmask,
@@ -246,12 +272,7 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
        buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
 
        /* smooth or flat for all */
-#if 0
        buffers->smooth = mpoly[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
-#else
-       /* for DrawManager we dont support mixed smooth/flat */
-       buffers->smooth = (mpoly[0].flag & ME_SMOOTH) != 0;
-#endif
 
        buffers->show_mask = false;
 
@@ -274,14 +295,19 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
                return buffers;
        }
 
+       GPU_BATCH_DISCARD_SAFE(buffers->triangles);
+       GPU_BATCH_DISCARD_SAFE(buffers->lines);
+       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
+       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
+
        /* An element index buffer is used for smooth shading, but flat
-        * shading requires separate vertex normals so an index buffer is
+        * shading requires separate vertex normals so an index buffer
         * can't be used there. */
        if (buffers->smooth) {
-               /* Fill the triangle buffer */
-               buffers->index_buf = NULL;
-               GPUIndexBufBuilder elb;
+               /* Fill the triangle and line buffers. */
+               GPUIndexBufBuilder elb, elb_lines;
                GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, INT_MAX);
+               GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
 
                for (i = 0; i < face_indices_len; ++i) {
                        const MLoopTri *lt = &looptri[face_indices[i]];
@@ -291,15 +317,33 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
                                continue;
 
                        GPU_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i]));
+
+                       /* TODO skip "non-real" edges. */
+                       GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][0], face_vert_indices[i][1]);
+                       GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][1], face_vert_indices[i][2]);
+                       GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][2], face_vert_indices[i][0]);
                }
                buffers->index_buf = GPU_indexbuf_build(&elb);
+               buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
        }
        else {
-               if (!buffers->is_index_buf_global) {
-                       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
+               /* Fill the only the line buffer. */
+               GPUIndexBufBuilder elb_lines;
+               GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
+
+               for (i = 0; i < face_indices_len; ++i) {
+                       const MLoopTri *lt = &looptri[face_indices[i]];
+
+                       /* Skip hidden faces */
+                       if (paint_is_face_hidden(lt, mvert, mloop))
+                               continue;
+
+                       /* TODO skip "non-real" edges. */
+                       GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 0, i * 3 + 1);
+                       GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 1, i * 3 + 2);
+                       GPU_indexbuf_add_line_verts(&elb_lines, i * 3 + 2, i * 3 + 0);
                }
-               buffers->index_buf = NULL;
-               buffers->is_index_buf_global = false;
+               buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
        }
 
        buffers->tot_tri = tottri;
@@ -314,21 +358,35 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
        return buffers;
 }
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Grid PBVH
+ * \{ */
+
 static void gpu_pbvh_grid_fill_fast_buffer(GPU_PBVH_Buffers *buffers, int totgrid, int gridsize)
 {
-       GPUIndexBufBuilder elb;
+       GPUIndexBufBuilder elb, elb_lines;
        if (buffers->smooth) {
-               GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 6 * totgrid, INT_MAX);
+               GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
+               GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
                for (int i = 0; i < totgrid; i++) {
-                       GPU_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1);
-                       GPU_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize);
-                       GPU_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize);
-                       GPU_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - 1);
-                       GPU_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1);
-                       GPU_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize);
+                       const uint v0 = i * gridsize * gridsize + gridsize - 1;
+                       const uint v1 = i * gridsize * gridsize;
+                       const uint v2 = (i + 1) * gridsize * gridsize - gridsize;
+                       const uint v3 = (i + 1) * gridsize * gridsize - 1;
+
+                       GPU_indexbuf_add_tri_verts(&elb, v0, v1, v2);
+                       GPU_indexbuf_add_tri_verts(&elb, v3, v0, v2);
+
+                       GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+                       GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+                       GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+                       GPU_indexbuf_add_line_verts(&elb_lines, v3, v0);
                }
        }
        else {
+               GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
                GPU_indexbuf_init_ex(&elb, GPU_PRIM_TRI_STRIP, 5 * totgrid, INT_MAX, true);
                uint vbo_index_offset = 0;
                for (int i = 0; i < totgrid; i++) {
@@ -349,13 +407,13 @@ static void gpu_pbvh_grid_fill_fast_buffer(GPU_PBVH_Buffers *buffers, int totgri
                                        if (is_grid_start && is_row_start) {
                                                grid_indices[0] = vbo_index_offset + 0;
                                        }
-                                       else if (is_grid_start && is_row_end) {
+                                       if (is_grid_start && is_row_end) {
                                                grid_indices[1] = vbo_index_offset + 2;
                                        }
-                                       else if (is_grid_end && is_row_start) {
+                                       if (is_grid_end && is_row_start) {
                                                grid_indices[2] = vbo_index_offset + 1;
                                        }
-                                       else if (is_grid_end && is_row_end) {
+                                       if (is_grid_end && is_row_end) {
                                                grid_indices[3] = vbo_index_offset + 3;
                                        }
                                        vbo_index_offset += 4;
@@ -370,9 +428,15 @@ static void gpu_pbvh_grid_fill_fast_buffer(GPU_PBVH_Buffers *buffers, int totgri
                        GPU_indexbuf_add_generic_vert(&elb, grid_indices[3]);
                        GPU_indexbuf_add_generic_vert(&elb, grid_indices[2]);
                        GPU_indexbuf_add_primitive_restart(&elb);
+
+                       GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[0], grid_indices[1]);
+                       GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[1], grid_indices[3]);
+                       GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[2], grid_indices[3]);
+                       GPU_indexbuf_add_line_verts(&elb_lines, grid_indices[2], grid_indices[0]);
                }
        }
        buffers->index_buf_fast = GPU_indexbuf_build(&elb);
+       buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines);
 }
 
 void GPU_pbvh_grid_buffers_update(
@@ -393,31 +457,30 @@ void GPU_pbvh_grid_buffers_update(
        uint vert_count = totgrid * key->grid_area;
 
        if (!buffers->smooth) {
-               vert_count = totgrid * (key->grid_size - 1) * (key->grid_size - 1) * 4;
+               vert_count = totgrid * SQUARE(key->grid_size - 1) * 4;
                /* Count strip restart verts (2 verts between each row and grid) */
                vert_count += ((totgrid - 1) + totgrid * (key->grid_size - 2)) * 2;
        }
 
-       if (buffers->smooth && buffers->index_buf == NULL) {
-               /* Not sure if really needed.  */
-               GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
-               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
-       }
-       else if (!buffers->smooth && buffers->index_buf != NULL) {
-               /* Discard unnecessary index buffers. */
-               GPU_BATCH_DISCARD_SAFE(buffers->triangles);
-               GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
-               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
-               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
-       }
+       GPU_BATCH_DISCARD_SAFE(buffers->triangles);
+       GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
+       GPU_BATCH_DISCARD_SAFE(buffers->lines);
+       GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
+       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
+       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
+       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
+       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
 
-       if (buffers->index_buf_fast == NULL) {
+       if (buffers->index_buf_fast == NULL && key->grid_size > 2) {
                gpu_pbvh_grid_fill_fast_buffer(buffers, totgrid, key->grid_size);
        }
 
        uint vbo_index_offset = 0;
        /* Build VBO */
        if (gpu_pbvh_vert_buf_data_set(buffers, vert_count)) {
+               GPUIndexBufBuilder elb_lines;
+               GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
+
                for (i = 0; i < totgrid; ++i) {
                        CCGElem *grid = grids[grid_indices[i]];
                        int vbo_index = vbo_index_offset;
@@ -437,6 +500,13 @@ void GPU_pbvh_grid_buffers_update(
                                                        GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask);
                                                        empty_mask = empty_mask && (fmask == 0.0f);
                                                }
+
+                                               if (x + 1 < key->grid_size) {
+                                                       GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + 1);
+                                               }
+                                               if (y + 1 < key->grid_size) {
+                                                       GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + key->grid_size);
+                                               }
                                                vbo_index += 1;
                                        }
                                }
@@ -488,6 +558,15 @@ void GPU_pbvh_grid_buffers_update(
                                                GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 3, co[1]);
                                                GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 3, no_short);
 
+                                               GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + 1);
+                                               GPU_indexbuf_add_line_verts(&elb_lines, vbo_index, vbo_index + 2);
+                                               if (is_row_end) {
+                                                       GPU_indexbuf_add_line_verts(&elb_lines, vbo_index + 2, vbo_index + 3);
+                                               }
+                                               if (is_grid_end) {
+                                                       GPU_indexbuf_add_line_verts(&elb_lines, vbo_index + 1, vbo_index + 3);
+                                               }
+
                                                if (has_mask && show_mask) {
                                                        float fmask = (*CCG_elem_mask(key, elems[0]) +
                                                                       *CCG_elem_mask(key, elems[1]) +
@@ -507,7 +586,6 @@ void GPU_pbvh_grid_buffers_update(
                                                        vbo_index += 1;
                                                        vbo_index_offset += 1;
                                                }
-
                                                vbo_index += 4;
                                        }
                                }
@@ -515,6 +593,8 @@ void GPU_pbvh_grid_buffers_update(
                        }
                }
 
+               buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
+
                gpu_pbvh_batch_init(buffers, buffers->smooth ? GPU_PRIM_TRIS : GPU_PRIM_TRI_STRIP);
        }
 
@@ -612,17 +692,12 @@ GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(
         * Could be moved to the update function somehow. */
        if (totquad == fully_visible_totquad) {
                buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->tot_quad, totgrid);
-               buffers->has_hidden = false;
-               buffers->is_index_buf_global = false;
        }
        else {
                uint max_vert = totgrid * gridsize * gridsize;
                buffers->tot_quad = totquad;
 
                FILL_QUAD_BUFFER(max_vert, totquad, buffers->index_buf);
-
-               buffers->has_hidden = false;
-               buffers->is_index_buf_global = false;
        }
 
        return buffers;
@@ -630,6 +705,12 @@ GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(
 
 #undef FILL_QUAD_BUFFER
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name BMesh PBVH
+ * \{ */
+
 /* Output a BMVert into a VertexBufferFormat array
  *
  * The vertex is skipped if hidden, otherwise the output goes into
@@ -730,10 +811,15 @@ void GPU_pbvh_bmesh_buffers_update(
        if (buffers->smooth) {
                /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
                GPU_BATCH_DISCARD_SAFE(buffers->triangles);
+               GPU_BATCH_DISCARD_SAFE(buffers->lines);
+               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
+               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
                /* Count visible vertices */
                totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
        }
        else {
+               GPU_BATCH_DISCARD_SAFE(buffers->lines);
+               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
                totvert = tottri * 3;
        }
 
@@ -774,6 +860,9 @@ void GPU_pbvh_bmesh_buffers_update(
                else {
                        GSetIterator gs_iter;
 
+                       GPUIndexBufBuilder elb_lines;
+                       GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
+
                        GSET_ITER (gs_iter, bm_faces) {
                                BMFace *f = BLI_gsetIterator_getKey(&gs_iter);
 
@@ -784,9 +873,6 @@ void GPU_pbvh_bmesh_buffers_update(
                                        float fmask = 0.0f;
                                        int i;
 
-#if 0
-                                       BM_iter_as_array(bm, BM_VERTS_OF_FACE, f, (void **)v, 3);
-#endif
                                        BM_face_as_array_vert_tri(f, v);
 
                                        /* Average mask value */
@@ -795,6 +881,10 @@ void GPU_pbvh_bmesh_buffers_update(
                                        }
                                        fmask /= 3.0f;
 
+                                       GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
+                                       GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
+                                       GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
+
                                        for (i = 0; i < 3; i++) {
                                                gpu_bmesh_vert_to_buffer_copy__gwn(
                                                        v[i], buffers->vert_buf,
@@ -805,6 +895,7 @@ void GPU_pbvh_bmesh_buffers_update(
                                }
                        }
 
+                       buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
                        buffers->tot_tri = tottri;
                }
 
@@ -818,15 +909,11 @@ void GPU_pbvh_bmesh_buffers_update(
 
        if (buffers->smooth) {
                /* Fill the triangle buffer */
-               buffers->index_buf = NULL;
-               GPUIndexBufBuilder elb;
+               GPUIndexBufBuilder elb, elb_lines;
                GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, maxvert);
-
-               /* Initialize triangle index buffer */
-               buffers->is_index_buf_global = false;
+               GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, maxvert);
 
                /* Fill triangle index buffer */
-
                {
                        GSetIterator gs_iter;
 
@@ -837,8 +924,13 @@ void GPU_pbvh_bmesh_buffers_update(
                                        BMVert *v[3];
 
                                        BM_face_as_array_vert_tri(f, v);
-                                       GPU_indexbuf_add_tri_verts(
-                                               &elb, BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2]));
+
+                                       uint idx[3] = {BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2])};
+                                       GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
+
+                                       GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
+                                       GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
+                                       GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
                                }
                        }
 
@@ -850,21 +942,22 @@ void GPU_pbvh_bmesh_buffers_update(
                        else {
                                GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
                        }
+
+                       buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
                }
        }
-       else if (buffers->index_buf) {
-               if (!buffers->is_index_buf_global) {
-                       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
-               }
-               buffers->index_buf = NULL;
-               buffers->is_index_buf_global = false;
-       }
 
        buffers->show_mask = !empty_mask;
 
        gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
 }
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Generic
+ * \{ */
+
 GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
 {
        GPU_PBVH_Buffers *buffers;
@@ -877,10 +970,16 @@ GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
        return buffers;
 }
 
-GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast)
+GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
 {
-       return (fast && buffers->triangles_fast) ?
-               buffers->triangles_fast : buffers->triangles;
+       if (wires) {
+               return (fast && buffers->lines_fast) ?
+                       buffers->lines_fast : buffers->lines;
+       }
+       else {
+               return (fast && buffers->triangles_fast) ?
+                       buffers->triangles_fast : buffers->triangles;
+       }
 }
 
 bool GPU_pbvh_buffers_has_mask(GPU_PBVH_Buffers *buffers)
@@ -891,18 +990,26 @@ bool GPU_pbvh_buffers_has_mask(GPU_PBVH_Buffers *buffers)
 void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
 {
        if (buffers) {
+               GPU_BATCH_DISCARD_SAFE(buffers->lines);
+               GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
                GPU_BATCH_DISCARD_SAFE(buffers->triangles);
                GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
-               if (!buffers->is_index_buf_global) {
-                       GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
-               }
+               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
+               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
                GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
+               GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
                GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
 
                MEM_freeN(buffers);
        }
 }
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Debug
+ * \{ */
+
 /* debug function, draws the pbvh BB */
 void GPU_pbvh_BB_draw(float min[3], float max[3], bool leaf, uint pos)
 {
@@ -959,6 +1066,8 @@ void GPU_pbvh_BB_draw(float min[3], float max[3], bool leaf, uint pos)
        immEnd();
 }
 
+/** \} */
+
 void GPU_pbvh_fix_linking()
 {
 }