Cleanup: style
[blender.git] / source / blender / draw / intern / draw_manager_shader.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_shader.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "DNA_world_types.h"
29 #include "DNA_material_types.h"
30
31 #include "BLI_listbase.h"
32 #include "BLI_string.h"
33 #include "BLI_string_utils.h"
34 #include "BLI_threads.h"
35 #include "BLI_task.h"
36
37 #include "BKE_global.h"
38 #include "BKE_main.h"
39
40 #include "GPU_shader.h"
41 #include "GPU_material.h"
42
43 #include "WM_api.h"
44 #include "WM_types.h"
45
46 extern char datatoc_gpu_shader_2D_vert_glsl[];
47 extern char datatoc_gpu_shader_3D_vert_glsl[];
48 extern char datatoc_common_fullscreen_vert_glsl[];
49
50
51 /* -------------------------------------------------------------------- */
52
53 /** \name Deferred Compilation (DRW_deferred)
54  *
55  * Since compiling shader can take a long time, we do it in a non blocking
56  * manner in another thread.
57  *
58  * \{ */
59
60 typedef struct DRWDeferredShader {
61         struct DRWDeferredShader *prev, *next;
62
63         GPUMaterial *mat;
64         char *vert, *geom, *frag, *defs;
65 } DRWDeferredShader;
66
67 typedef struct DRWShaderCompiler {
68         ListBase queue; /* DRWDeferredShader */
69         SpinLock list_lock;
70
71         DRWDeferredShader *mat_compiling;
72         ThreadMutex compilation_lock;
73
74         void *ogl_context;
75
76         int shaders_done; /* To compute progress. */
77 } DRWShaderCompiler;
78
79 static void drw_deferred_shader_free(DRWDeferredShader *dsh)
80 {
81         /* Make sure it is not queued before freeing. */
82         MEM_SAFE_FREE(dsh->vert);
83         MEM_SAFE_FREE(dsh->geom);
84         MEM_SAFE_FREE(dsh->frag);
85         MEM_SAFE_FREE(dsh->defs);
86
87         MEM_freeN(dsh);
88 }
89
90 static void drw_deferred_shader_queue_free(ListBase *queue)
91 {
92         DRWDeferredShader *dsh;
93         while ((dsh = BLI_pophead(queue))) {
94                 drw_deferred_shader_free(dsh);
95         }
96 }
97
98 static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
99 {
100         DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
101         void *ogl_context = comp->ogl_context;
102
103         WM_opengl_context_activate(ogl_context);
104
105         while (true) {
106                 BLI_spin_lock(&comp->list_lock);
107
108                 if (*stop != 0) {
109                         /* We don't want user to be able to cancel the compilation
110                          * but wm can kill the task if we are closing blender. */
111                         BLI_spin_unlock(&comp->list_lock);
112                         break;
113                 }
114
115                 /* Pop tail because it will be less likely to lock the main thread
116                  * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
117                 comp->mat_compiling = BLI_poptail(&comp->queue);
118                 if (comp->mat_compiling == NULL) {
119                         /* No more Shader to compile. */
120                         BLI_spin_unlock(&comp->list_lock);
121                         break;
122                 }
123
124                 comp->shaders_done++;
125                 int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
126
127                 BLI_mutex_lock(&comp->compilation_lock);
128                 BLI_spin_unlock(&comp->list_lock);
129
130                 /* Do the compilation. */
131                 GPU_material_generate_pass(
132                         comp->mat_compiling->mat,
133                         comp->mat_compiling->vert,
134                         comp->mat_compiling->geom,
135                         comp->mat_compiling->frag,
136                         comp->mat_compiling->defs);
137
138                 *progress = (float)comp->shaders_done / (float)total;
139                 *do_update = true;
140
141                 glFlush();
142                 BLI_mutex_unlock(&comp->compilation_lock);
143
144                 drw_deferred_shader_free(comp->mat_compiling);
145         }
146
147         WM_opengl_context_release(ogl_context);
148 }
149
150 static void drw_deferred_shader_compilation_free(void *custom_data)
151 {
152         DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
153
154         drw_deferred_shader_queue_free(&comp->queue);
155
156         BLI_spin_end(&comp->list_lock);
157         BLI_mutex_end(&comp->compilation_lock);
158
159         if (comp->ogl_context) {
160                 /* Only destroy if the job owns the context. */
161                 WM_opengl_context_dispose(comp->ogl_context);
162         }
163
164         MEM_freeN(comp);
165 }
166
167 static void drw_deferred_shader_add(
168         GPUMaterial *mat, const char *vert, const char *geom, const char *frag_lib, const char *defines)
169 {
170         /* Do not deferre the compilation if we are rendering for image. */
171         if (DRW_state_is_image_render()) {
172                 /* Double checking that this GPUMaterial is not going to be
173                  * compiled by another thread. */
174                 DRW_deferred_shader_remove(mat);
175                 GPU_material_generate_pass(mat, vert, geom, frag_lib, defines);
176                 return;
177         }
178
179         DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
180
181         dsh->mat = mat;
182         if (vert)     dsh->vert = BLI_strdup(vert);
183         if (geom)     dsh->geom = BLI_strdup(geom);
184         if (frag_lib) dsh->frag = BLI_strdup(frag_lib);
185         if (defines)  dsh->defs = BLI_strdup(defines);
186
187         BLI_assert(DST.draw_ctx.evil_C);
188         wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
189         wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
190         Scene *scene = DST.draw_ctx.scene;
191
192         /* Get the running job or a new one if none is running. Can only have one job per type & owner.  */
193         wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
194                                     WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
195
196         DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
197
198         DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
199         BLI_spin_init(&comp->list_lock);
200         BLI_mutex_init(&comp->compilation_lock);
201
202         if (old_comp) {
203                 BLI_spin_lock(&old_comp->list_lock);
204                 BLI_movelisttolist(&comp->queue, &old_comp->queue);
205                 BLI_spin_unlock(&old_comp->list_lock);
206                 /* Do not recreate context, just pass ownership. */
207                 comp->ogl_context = old_comp->ogl_context;
208                 old_comp->ogl_context = NULL;
209         }
210
211         BLI_addtail(&comp->queue, dsh);
212
213         /* Create only one context. */
214         if (comp->ogl_context == NULL) {
215                 comp->ogl_context = WM_opengl_context_create();
216                 WM_opengl_context_activate(DST.ogl_context);
217         }
218
219         WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
220         WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
221         WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
222         WM_jobs_start(wm, wm_job);
223 }
224
225 void DRW_deferred_shader_remove(GPUMaterial *mat)
226 {
227         Scene *scene = GPU_material_scene(mat);
228
229         for (wmWindowManager *wm = G.main->wm.first; wm; wm = wm->id.next) {
230                 if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
231                         /* No job running, do not create a new one by calling WM_jobs_get. */
232                         continue;
233                 }
234                 for (wmWindow *win = wm->windows.first; win; win = win->next) {
235                         wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
236                                                     WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
237
238                         DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
239                         if (comp != NULL) {
240                                 BLI_spin_lock(&comp->list_lock);
241                                 DRWDeferredShader *dsh;
242                                 dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
243                                 if (dsh) {
244                                         BLI_remlink(&comp->queue, dsh);
245                                 }
246
247                                 /* Wait for compilation to finish */
248                                 if (comp->mat_compiling != NULL) {
249                                         if (comp->mat_compiling->mat == mat) {
250                                                 BLI_mutex_lock(&comp->compilation_lock);
251                                                 BLI_mutex_unlock(&comp->compilation_lock);
252                                         }
253                                 }
254                                 BLI_spin_unlock(&comp->list_lock);
255
256                                 if (dsh) {
257                                         drw_deferred_shader_free(dsh);
258                                 }
259                         }
260                 }
261         }
262 }
263
264 /** \} */
265
266 /* -------------------------------------------------------------------- */
267
268 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
269 {
270         return GPU_shader_create(vert, frag, geom, NULL, defines);
271 }
272
273 GPUShader *DRW_shader_create_with_lib(
274         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
275 {
276         GPUShader *sh;
277         char *vert_with_lib = NULL;
278         char *frag_with_lib = NULL;
279         char *geom_with_lib = NULL;
280
281         vert_with_lib = BLI_string_joinN(lib, vert);
282         frag_with_lib = BLI_string_joinN(lib, frag);
283         if (geom) {
284                 geom_with_lib = BLI_string_joinN(lib, geom);
285         }
286
287         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
288
289         MEM_freeN(vert_with_lib);
290         MEM_freeN(frag_with_lib);
291         if (geom) {
292                 MEM_freeN(geom_with_lib);
293         }
294
295         return sh;
296 }
297
298 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
299 {
300         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
301 }
302
303 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
304 {
305         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
306 }
307
308 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
309 {
310         return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines);
311 }
312
313 GPUShader *DRW_shader_create_3D_depth_only(void)
314 {
315         return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
316 }
317
318 GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options)
319 {
320         GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
321         if (DRW_state_is_image_render()) {
322                 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
323                         /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
324                          * with the shader code and we will resume the compilation from there. */
325                         return NULL;
326                 }
327         }
328         return mat;
329 }
330
331 GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options)
332 {
333         GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
334         if (DRW_state_is_image_render()) {
335                 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
336                         /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
337                          * with the shader code and we will resume the compilation from there. */
338                         return NULL;
339                 }
340         }
341         return mat;
342 }
343
344 GPUMaterial *DRW_shader_create_from_world(
345         struct Scene *scene, World *wo, const void *engine_type, int options,
346         const char *vert, const char *geom, const char *frag_lib, const char *defines)
347 {
348         GPUMaterial *mat = NULL;
349         if (DRW_state_is_image_render()) {
350                 mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
351         }
352
353         if (mat == NULL) {
354                 mat = GPU_material_from_nodetree(
355                         scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
356                         vert, geom, frag_lib, defines, true);
357         }
358
359         drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
360
361         return mat;
362 }
363
364 GPUMaterial *DRW_shader_create_from_material(
365         struct Scene *scene, Material *ma, const void *engine_type, int options,
366         const char *vert, const char *geom, const char *frag_lib, const char *defines)
367 {
368         GPUMaterial *mat = NULL;
369         if (DRW_state_is_image_render()) {
370                 mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
371         }
372
373         if (mat == NULL) {
374                 mat = GPU_material_from_nodetree(
375                         scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
376                         vert, geom, frag_lib, defines, true);
377         }
378
379         drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
380
381         return mat;
382 }
383
384 void DRW_shader_free(GPUShader *shader)
385 {
386         GPU_shader_free(shader);
387 }