Merge branch 'blender2.7'
[blender.git] / source / blender / draw / intern / draw_cache_impl_metaball.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file draw_cache_impl_metaball.c
25  *  \ingroup draw
26  *
27  * \brief MetaBall API for render engines
28  */
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_utildefines.h"
33
34 #include "DNA_meta_types.h"
35 #include "DNA_object_types.h"
36
37 #include "BKE_curve.h"
38 #include "BKE_mball.h"
39
40 #include "GPU_batch.h"
41
42 #include "DRW_render.h"
43
44 #include "draw_cache_impl.h"  /* own include */
45
46
47 static void metaball_batch_cache_clear(MetaBall *mb);
48
49 /* ---------------------------------------------------------------------- */
50 /* MetaBall GPUBatch Cache */
51
52 typedef struct MetaBallBatchCache {
53         GPUBatch *batch;
54         GPUBatch **shaded_triangles;
55         int mat_len;
56
57         /* Shared */
58         GPUVertBuf *pos_nor_in_order;
59
60         /* Wireframe */
61         struct {
62                 GPUBatch *batch;
63         } face_wire;
64
65         /* settings to determine if cache is invalid */
66         bool is_dirty;
67 } MetaBallBatchCache;
68
69 /* GPUBatch cache management. */
70
71 static bool metaball_batch_cache_valid(MetaBall *mb)
72 {
73         MetaBallBatchCache *cache = mb->batch_cache;
74
75         if (cache == NULL) {
76                 return false;
77         }
78
79         return cache->is_dirty == false;
80 }
81
82 static void metaball_batch_cache_init(MetaBall *mb)
83 {
84         MetaBallBatchCache *cache = mb->batch_cache;
85
86         if (!cache) {
87                 cache = mb->batch_cache = MEM_mallocN(sizeof(*cache), __func__);
88         }
89         cache->batch = NULL;
90         cache->mat_len = 0;
91         cache->shaded_triangles = NULL;
92         cache->is_dirty = false;
93         cache->pos_nor_in_order = NULL;
94         cache->face_wire.batch = NULL;
95 }
96
97 static MetaBallBatchCache *metaball_batch_cache_get(MetaBall *mb)
98 {
99         if (!metaball_batch_cache_valid(mb)) {
100                 metaball_batch_cache_clear(mb);
101                 metaball_batch_cache_init(mb);
102         }
103         return mb->batch_cache;
104 }
105
106 void DRW_mball_batch_cache_dirty_tag(MetaBall *mb, int mode)
107 {
108         MetaBallBatchCache *cache = mb->batch_cache;
109         if (cache == NULL) {
110                 return;
111         }
112         switch (mode) {
113                 case BKE_MBALL_BATCH_DIRTY_ALL:
114                         cache->is_dirty = true;
115                         break;
116                 default:
117                         BLI_assert(0);
118         }
119 }
120
121 static void metaball_batch_cache_clear(MetaBall *mb)
122 {
123         MetaBallBatchCache *cache = mb->batch_cache;
124         if (!cache) {
125                 return;
126         }
127
128         GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
129         GPU_BATCH_DISCARD_SAFE(cache->batch);
130         GPU_VERTBUF_DISCARD_SAFE(cache->pos_nor_in_order);
131         /* Note: shaded_triangles[0] is already freed by cache->batch */
132         MEM_SAFE_FREE(cache->shaded_triangles);
133         cache->mat_len = 0;
134 }
135
136 void DRW_mball_batch_cache_free(MetaBall *mb)
137 {
138         metaball_batch_cache_clear(mb);
139         MEM_SAFE_FREE(mb->batch_cache);
140 }
141
142 static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBatchCache *cache)
143 {
144         if (cache->pos_nor_in_order == NULL) {
145                 ListBase *lb = &ob->runtime.curve_cache->disp;
146                 cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
147                 DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
148         }
149         return cache->pos_nor_in_order;
150 }
151
152 /* -------------------------------------------------------------------- */
153
154 /** \name Public Object/MetaBall API
155  * \{ */
156
157 GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
158 {
159         if (!BKE_mball_is_basis(ob)) {
160                 return NULL;
161         }
162
163         MetaBall *mb = ob->data;
164         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
165
166         if (cache->batch == NULL) {
167                 ListBase *lb = &ob->runtime.curve_cache->disp;
168                 GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
169                 DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
170                 cache->batch = GPU_batch_create_ex(
171                         GPU_PRIM_TRIS,
172                         mball_batch_cache_get_pos_and_normals(ob, cache),
173                         ibo,
174                         GPU_BATCH_OWNS_INDEX);
175         }
176
177         return cache->batch;
178 }
179
180 GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(Object *ob, MetaBall *mb, struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
181 {
182         if (!BKE_mball_is_basis(ob)) {
183                 return NULL;
184         }
185
186         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
187         if (cache->shaded_triangles == NULL) {
188                 cache->mat_len = gpumat_array_len;
189                 cache->shaded_triangles = MEM_callocN(sizeof(*cache->shaded_triangles) * cache->mat_len, __func__);
190                 cache->shaded_triangles[0] = DRW_metaball_batch_cache_get_triangles_with_normals(ob);
191                 for (int i = 1; i < cache->mat_len; ++i) {
192                         cache->shaded_triangles[i] = NULL;
193                 }
194         }
195         return cache->shaded_triangles;
196
197 }
198
199 GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
200 {
201         if (!BKE_mball_is_basis(ob)) {
202                 return NULL;
203         }
204
205         MetaBall *mb = ob->data;
206         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
207
208         if (cache->face_wire.batch == NULL) {
209                 ListBase *lb = &ob->runtime.curve_cache->disp;
210
211                 GPUVertBuf *vbo_pos_nor = MEM_callocN(sizeof(GPUVertBuf), __func__);
212                 GPUVertBuf *vbo_wireframe_data = MEM_callocN(sizeof(GPUVertBuf), __func__);
213
214                 DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(lb, vbo_pos_nor, NULL);
215                 DRW_displist_vertbuf_create_wireframe_data_tess(lb, vbo_wireframe_data);
216
217                 cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo_pos_nor, NULL, GPU_BATCH_OWNS_VBO);
218                 GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wireframe_data, true);
219         }
220
221         return cache->face_wire.batch;
222 }