Cleanup: draw manager headers
[blender.git] / source / blender / draw / intern / draw_cache_impl_metaball.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 by Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file draw_cache_impl_metaball.c
25  *  \ingroup draw
26  *
27  * \brief MetaBall API for render engines
28  */
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_utildefines.h"
33
34 #include "DNA_meta_types.h"
35 #include "DNA_object_types.h"
36
37 #include "BKE_curve.h"
38 #include "BKE_mball.h"
39
40 #include "GPU_batch.h"
41
42
43 #include "draw_cache_impl.h"  /* own include */
44
45
46 static void metaball_batch_cache_clear(MetaBall *mb);
47
48 /* ---------------------------------------------------------------------- */
49 /* MetaBall GPUBatch Cache */
50
51 typedef struct MetaBallBatchCache {
52         GPUBatch *batch;
53         GPUBatch **shaded_triangles;
54         int mat_len;
55
56         /* Shared */
57         GPUVertBuf *pos_nor_in_order;
58
59         /* Wireframe */
60         struct {
61                 GPUBatch *batch;
62         } face_wire;
63
64         /* settings to determine if cache is invalid */
65         bool is_dirty;
66 } MetaBallBatchCache;
67
68 /* GPUBatch cache management. */
69
70 static bool metaball_batch_cache_valid(MetaBall *mb)
71 {
72         MetaBallBatchCache *cache = mb->batch_cache;
73
74         if (cache == NULL) {
75                 return false;
76         }
77
78         return cache->is_dirty == false;
79 }
80
81 static void metaball_batch_cache_init(MetaBall *mb)
82 {
83         MetaBallBatchCache *cache = mb->batch_cache;
84
85         if (!cache) {
86                 cache = mb->batch_cache = MEM_mallocN(sizeof(*cache), __func__);
87         }
88         cache->batch = NULL;
89         cache->mat_len = 0;
90         cache->shaded_triangles = NULL;
91         cache->is_dirty = false;
92         cache->pos_nor_in_order = NULL;
93         cache->face_wire.batch = NULL;
94 }
95
96 static MetaBallBatchCache *metaball_batch_cache_get(MetaBall *mb)
97 {
98         if (!metaball_batch_cache_valid(mb)) {
99                 metaball_batch_cache_clear(mb);
100                 metaball_batch_cache_init(mb);
101         }
102         return mb->batch_cache;
103 }
104
105 void DRW_mball_batch_cache_dirty_tag(MetaBall *mb, int mode)
106 {
107         MetaBallBatchCache *cache = mb->batch_cache;
108         if (cache == NULL) {
109                 return;
110         }
111         switch (mode) {
112                 case BKE_MBALL_BATCH_DIRTY_ALL:
113                         cache->is_dirty = true;
114                         break;
115                 default:
116                         BLI_assert(0);
117         }
118 }
119
120 static void metaball_batch_cache_clear(MetaBall *mb)
121 {
122         MetaBallBatchCache *cache = mb->batch_cache;
123         if (!cache) {
124                 return;
125         }
126
127         GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
128         GPU_BATCH_DISCARD_SAFE(cache->batch);
129         GPU_VERTBUF_DISCARD_SAFE(cache->pos_nor_in_order);
130         /* Note: shaded_triangles[0] is already freed by cache->batch */
131         MEM_SAFE_FREE(cache->shaded_triangles);
132         cache->mat_len = 0;
133 }
134
135 void DRW_mball_batch_cache_free(MetaBall *mb)
136 {
137         metaball_batch_cache_clear(mb);
138         MEM_SAFE_FREE(mb->batch_cache);
139 }
140
141 static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBatchCache *cache)
142 {
143         if (cache->pos_nor_in_order == NULL) {
144                 ListBase *lb = &ob->runtime.curve_cache->disp;
145                 cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
146                 DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
147         }
148         return cache->pos_nor_in_order;
149 }
150
151 /* -------------------------------------------------------------------- */
152
153 /** \name Public Object/MetaBall API
154  * \{ */
155
156 GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
157 {
158         if (!BKE_mball_is_basis(ob)) {
159                 return NULL;
160         }
161
162         MetaBall *mb = ob->data;
163         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
164
165         if (cache->batch == NULL) {
166                 ListBase *lb = &ob->runtime.curve_cache->disp;
167                 GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
168                 DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
169                 cache->batch = GPU_batch_create_ex(
170                         GPU_PRIM_TRIS,
171                         mball_batch_cache_get_pos_and_normals(ob, cache),
172                         ibo,
173                         GPU_BATCH_OWNS_INDEX);
174         }
175
176         return cache->batch;
177 }
178
179 GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(Object *ob, MetaBall *mb, struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
180 {
181         if (!BKE_mball_is_basis(ob)) {
182                 return NULL;
183         }
184
185         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
186         if (cache->shaded_triangles == NULL) {
187                 cache->mat_len = gpumat_array_len;
188                 cache->shaded_triangles = MEM_callocN(sizeof(*cache->shaded_triangles) * cache->mat_len, __func__);
189                 cache->shaded_triangles[0] = DRW_metaball_batch_cache_get_triangles_with_normals(ob);
190                 for (int i = 1; i < cache->mat_len; ++i) {
191                         cache->shaded_triangles[i] = NULL;
192                 }
193         }
194         return cache->shaded_triangles;
195
196 }
197
198 GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
199 {
200         if (!BKE_mball_is_basis(ob)) {
201                 return NULL;
202         }
203
204         MetaBall *mb = ob->data;
205         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
206
207         if (cache->face_wire.batch == NULL) {
208                 ListBase *lb = &ob->runtime.curve_cache->disp;
209
210                 GPUVertBuf *vbo_pos_nor = MEM_callocN(sizeof(GPUVertBuf), __func__);
211                 GPUVertBuf *vbo_wireframe_data = MEM_callocN(sizeof(GPUVertBuf), __func__);
212
213                 DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(lb, vbo_pos_nor, NULL);
214                 DRW_displist_vertbuf_create_wireframe_data_tess(lb, vbo_wireframe_data);
215
216                 cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo_pos_nor, NULL, GPU_BATCH_OWNS_VBO);
217                 GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wireframe_data, true);
218         }
219
220         return cache->face_wire.batch;
221 }