Cleanup: remove redundant, invalid info from headers
[blender.git] / source / blender / draw / intern / draw_cache_impl_metaball.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file draw_cache_impl_metaball.c
21  *  \ingroup draw
22  *
23  * \brief MetaBall API for render engines
24  */
25
26 #include "MEM_guardedalloc.h"
27
28 #include "BLI_utildefines.h"
29
30 #include "DNA_meta_types.h"
31 #include "DNA_object_types.h"
32
33 #include "BKE_curve.h"
34 #include "BKE_mball.h"
35
36 #include "GPU_batch.h"
37
38
39 #include "draw_cache_impl.h"  /* own include */
40
41
42 static void metaball_batch_cache_clear(MetaBall *mb);
43
44 /* ---------------------------------------------------------------------- */
45 /* MetaBall GPUBatch Cache */
46
47 typedef struct MetaBallBatchCache {
48         GPUBatch *batch;
49         GPUBatch **shaded_triangles;
50         int mat_len;
51
52         /* Shared */
53         GPUVertBuf *pos_nor_in_order;
54
55         /* Wireframe */
56         struct {
57                 GPUBatch *batch;
58         } face_wire;
59
60         /* settings to determine if cache is invalid */
61         bool is_dirty;
62 } MetaBallBatchCache;
63
64 /* GPUBatch cache management. */
65
66 static bool metaball_batch_cache_valid(MetaBall *mb)
67 {
68         MetaBallBatchCache *cache = mb->batch_cache;
69
70         if (cache == NULL) {
71                 return false;
72         }
73
74         return cache->is_dirty == false;
75 }
76
77 static void metaball_batch_cache_init(MetaBall *mb)
78 {
79         MetaBallBatchCache *cache = mb->batch_cache;
80
81         if (!cache) {
82                 cache = mb->batch_cache = MEM_mallocN(sizeof(*cache), __func__);
83         }
84         cache->batch = NULL;
85         cache->mat_len = 0;
86         cache->shaded_triangles = NULL;
87         cache->is_dirty = false;
88         cache->pos_nor_in_order = NULL;
89         cache->face_wire.batch = NULL;
90 }
91
92 static MetaBallBatchCache *metaball_batch_cache_get(MetaBall *mb)
93 {
94         if (!metaball_batch_cache_valid(mb)) {
95                 metaball_batch_cache_clear(mb);
96                 metaball_batch_cache_init(mb);
97         }
98         return mb->batch_cache;
99 }
100
101 void DRW_mball_batch_cache_dirty_tag(MetaBall *mb, int mode)
102 {
103         MetaBallBatchCache *cache = mb->batch_cache;
104         if (cache == NULL) {
105                 return;
106         }
107         switch (mode) {
108                 case BKE_MBALL_BATCH_DIRTY_ALL:
109                         cache->is_dirty = true;
110                         break;
111                 default:
112                         BLI_assert(0);
113         }
114 }
115
116 static void metaball_batch_cache_clear(MetaBall *mb)
117 {
118         MetaBallBatchCache *cache = mb->batch_cache;
119         if (!cache) {
120                 return;
121         }
122
123         GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
124         GPU_BATCH_DISCARD_SAFE(cache->batch);
125         GPU_VERTBUF_DISCARD_SAFE(cache->pos_nor_in_order);
126         /* Note: shaded_triangles[0] is already freed by cache->batch */
127         MEM_SAFE_FREE(cache->shaded_triangles);
128         cache->mat_len = 0;
129 }
130
131 void DRW_mball_batch_cache_free(MetaBall *mb)
132 {
133         metaball_batch_cache_clear(mb);
134         MEM_SAFE_FREE(mb->batch_cache);
135 }
136
137 static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBatchCache *cache)
138 {
139         if (cache->pos_nor_in_order == NULL) {
140                 ListBase *lb = &ob->runtime.curve_cache->disp;
141                 cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
142                 DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
143         }
144         return cache->pos_nor_in_order;
145 }
146
147 /* -------------------------------------------------------------------- */
148 /** \name Public Object/MetaBall API
149  * \{ */
150
151 GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
152 {
153         if (!BKE_mball_is_basis(ob)) {
154                 return NULL;
155         }
156
157         MetaBall *mb = ob->data;
158         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
159
160         if (cache->batch == NULL) {
161                 ListBase *lb = &ob->runtime.curve_cache->disp;
162                 GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
163                 DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
164                 cache->batch = GPU_batch_create_ex(
165                         GPU_PRIM_TRIS,
166                         mball_batch_cache_get_pos_and_normals(ob, cache),
167                         ibo,
168                         GPU_BATCH_OWNS_INDEX);
169         }
170
171         return cache->batch;
172 }
173
174 GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(Object *ob, MetaBall *mb, struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
175 {
176         if (!BKE_mball_is_basis(ob)) {
177                 return NULL;
178         }
179
180         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
181         if (cache->shaded_triangles == NULL) {
182                 cache->mat_len = gpumat_array_len;
183                 cache->shaded_triangles = MEM_callocN(sizeof(*cache->shaded_triangles) * cache->mat_len, __func__);
184                 cache->shaded_triangles[0] = DRW_metaball_batch_cache_get_triangles_with_normals(ob);
185                 for (int i = 1; i < cache->mat_len; ++i) {
186                         cache->shaded_triangles[i] = NULL;
187                 }
188         }
189         return cache->shaded_triangles;
190
191 }
192
193 GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
194 {
195         if (!BKE_mball_is_basis(ob)) {
196                 return NULL;
197         }
198
199         MetaBall *mb = ob->data;
200         MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
201
202         if (cache->face_wire.batch == NULL) {
203                 ListBase *lb = &ob->runtime.curve_cache->disp;
204
205                 GPUVertBuf *vbo_pos_nor = MEM_callocN(sizeof(GPUVertBuf), __func__);
206                 GPUVertBuf *vbo_wireframe_data = MEM_callocN(sizeof(GPUVertBuf), __func__);
207
208                 DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(lb, vbo_pos_nor, NULL);
209                 DRW_displist_vertbuf_create_wireframe_data_tess(lb, vbo_wireframe_data);
210
211                 cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo_pos_nor, NULL, GPU_BATCH_OWNS_VBO);
212                 GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wireframe_data, true);
213         }
214
215         return cache->face_wire.batch;
216 }