Cleanup: remove redundant doxygen \file argument
[blender.git] / source / blender / draw / intern / draw_manager_shader.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18
19 /** \file \ingroup draw
20  */
21
22 #include "DNA_object_types.h"
23 #include "DNA_world_types.h"
24 #include "DNA_material_types.h"
25
26 #include "BLI_listbase.h"
27 #include "BLI_string_utils.h"
28 #include "BLI_threads.h"
29
30 #include "BKE_global.h"
31 #include "BKE_main.h"
32
33 #include "DEG_depsgraph_query.h"
34
35 #include "GPU_shader.h"
36 #include "GPU_material.h"
37
38 #include "WM_api.h"
39 #include "WM_types.h"
40
41 #include "draw_manager.h"
42
43 extern char datatoc_gpu_shader_2D_vert_glsl[];
44 extern char datatoc_gpu_shader_3D_vert_glsl[];
45 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
46 extern char datatoc_common_fullscreen_vert_glsl[];
47
48 #define USE_DEFERRED_COMPILATION 1
49
50 /* -------------------------------------------------------------------- */
51 /** \name Deferred Compilation (DRW_deferred)
52  *
53  * Since compiling shader can take a long time, we do it in a non blocking
54  * manner in another thread.
55  *
56  * \{ */
57
58 typedef struct DRWDeferredShader {
59         struct DRWDeferredShader *prev, *next;
60
61         GPUMaterial *mat;
62 } DRWDeferredShader;
63
64 typedef struct DRWShaderCompiler {
65         ListBase queue; /* DRWDeferredShader */
66         SpinLock list_lock;
67
68         DRWDeferredShader *mat_compiling;
69         ThreadMutex compilation_lock;
70
71         void *gl_context;
72         bool own_context;
73
74         int shaders_done; /* To compute progress. */
75 } DRWShaderCompiler;
76
77 static void drw_deferred_shader_free(DRWDeferredShader *dsh)
78 {
79         /* Make sure it is not queued before freeing. */
80         MEM_freeN(dsh);
81 }
82
83 static void drw_deferred_shader_queue_free(ListBase *queue)
84 {
85         DRWDeferredShader *dsh;
86         while ((dsh = BLI_pophead(queue))) {
87                 drw_deferred_shader_free(dsh);
88         }
89 }
90
91 static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
92 {
93         DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
94         void *gl_context = comp->gl_context;
95
96         WM_opengl_context_activate(gl_context);
97
98         while (true) {
99                 BLI_spin_lock(&comp->list_lock);
100
101                 if (*stop != 0) {
102                         /* We don't want user to be able to cancel the compilation
103                          * but wm can kill the task if we are closing blender. */
104                         BLI_spin_unlock(&comp->list_lock);
105                         break;
106                 }
107
108                 /* Pop tail because it will be less likely to lock the main thread
109                  * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
110                 comp->mat_compiling = BLI_poptail(&comp->queue);
111                 if (comp->mat_compiling == NULL) {
112                         /* No more Shader to compile. */
113                         BLI_spin_unlock(&comp->list_lock);
114                         break;
115                 }
116
117                 comp->shaders_done++;
118                 int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
119
120                 BLI_mutex_lock(&comp->compilation_lock);
121                 BLI_spin_unlock(&comp->list_lock);
122
123                 /* Do the compilation. */
124                 GPU_material_compile(comp->mat_compiling->mat);
125
126                 *progress = (float)comp->shaders_done / (float)total;
127                 *do_update = true;
128
129                 GPU_flush();
130                 BLI_mutex_unlock(&comp->compilation_lock);
131
132                 BLI_spin_lock(&comp->list_lock);
133                 drw_deferred_shader_free(comp->mat_compiling);
134                 comp->mat_compiling = NULL;
135                 BLI_spin_unlock(&comp->list_lock);
136         }
137
138         WM_opengl_context_release(gl_context);
139 }
140
141 static void drw_deferred_shader_compilation_free(void *custom_data)
142 {
143         DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
144
145         drw_deferred_shader_queue_free(&comp->queue);
146
147         BLI_spin_end(&comp->list_lock);
148         BLI_mutex_end(&comp->compilation_lock);
149
150         if (comp->own_context) {
151                 /* Only destroy if the job owns the context. */
152                 WM_opengl_context_dispose(comp->gl_context);
153         }
154
155         MEM_freeN(comp);
156 }
157
158 static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
159 {
160         /* Do not deferre the compilation if we are rendering for image. */
161         if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
162                 /* Double checking that this GPUMaterial is not going to be
163                  * compiled by another thread. */
164                 DRW_deferred_shader_remove(mat);
165                 GPU_material_compile(mat);
166                 return;
167         }
168
169         DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
170
171         dsh->mat = mat;
172
173         BLI_assert(DST.draw_ctx.evil_C);
174         wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
175         wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
176
177         /* Use original scene ID since this is what the jobs template tests for. */
178         Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
179
180         /* Get the running job or a new one if none is running. Can only have one job per type & owner.  */
181         wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
182                                     WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
183
184         DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
185
186         DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
187         BLI_spin_init(&comp->list_lock);
188         BLI_mutex_init(&comp->compilation_lock);
189
190         if (old_comp) {
191                 BLI_spin_lock(&old_comp->list_lock);
192                 BLI_movelisttolist(&comp->queue, &old_comp->queue);
193                 BLI_spin_unlock(&old_comp->list_lock);
194                 /* Do not recreate context, just pass ownership. */
195                 if (old_comp->gl_context) {
196                         comp->gl_context = old_comp->gl_context;
197                         old_comp->own_context = false;
198                         comp->own_context = true;
199                 }
200         }
201
202         BLI_addtail(&comp->queue, dsh);
203
204         /* Create only one context. */
205         if (comp->gl_context == NULL) {
206                 comp->gl_context = WM_opengl_context_create();
207                 WM_opengl_context_activate(DST.gl_context);
208                 comp->own_context = true;
209         }
210
211         WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
212         WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
213         WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
214         WM_jobs_start(wm, wm_job);
215 }
216
217 void DRW_deferred_shader_remove(GPUMaterial *mat)
218 {
219         Scene *scene = GPU_material_scene(mat);
220
221         for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
222                 if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
223                         /* No job running, do not create a new one by calling WM_jobs_get. */
224                         continue;
225                 }
226                 for (wmWindow *win = wm->windows.first; win; win = win->next) {
227                         wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
228                                                     WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
229
230                         DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
231                         if (comp != NULL) {
232                                 BLI_spin_lock(&comp->list_lock);
233                                 DRWDeferredShader *dsh;
234                                 dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
235                                 if (dsh) {
236                                         BLI_remlink(&comp->queue, dsh);
237                                 }
238
239                                 /* Wait for compilation to finish */
240                                 if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
241                                         BLI_mutex_lock(&comp->compilation_lock);
242                                         BLI_mutex_unlock(&comp->compilation_lock);
243                                 }
244
245                                 BLI_spin_unlock(&comp->list_lock);
246
247                                 if (dsh) {
248                                         drw_deferred_shader_free(dsh);
249                                 }
250                         }
251                 }
252         }
253 }
254
255 /** \} */
256
257 /* -------------------------------------------------------------------- */
258
259 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
260 {
261         return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
262 }
263
264 GPUShader *DRW_shader_create_with_lib(
265         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
266 {
267         GPUShader *sh;
268         char *vert_with_lib = NULL;
269         char *frag_with_lib = NULL;
270         char *geom_with_lib = NULL;
271
272         vert_with_lib = BLI_string_joinN(lib, vert);
273         frag_with_lib = BLI_string_joinN(lib, frag);
274         if (geom) {
275                 geom_with_lib = BLI_string_joinN(lib, geom);
276         }
277
278         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
279
280         MEM_freeN(vert_with_lib);
281         MEM_freeN(frag_with_lib);
282         if (geom) {
283                 MEM_freeN(geom_with_lib);
284         }
285
286         return sh;
287 }
288
289 GPUShader *DRW_shader_create_with_transform_feedback(
290         const char *vert, const char *geom, const char *defines,
291         const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count)
292 {
293         return GPU_shader_create_ex(vert,
294                                     datatoc_gpu_shader_depth_only_frag_glsl,
295                                     geom, NULL, defines,
296                                     prim_type, varying_names, varying_count, __func__);
297 }
298
299 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
300 {
301         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
302 }
303
304 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
305 {
306         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
307 }
308
309 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
310 {
311         return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
312 }
313
314 GPUShader *DRW_shader_create_3D_depth_only(eGPUShaderConfig shader_cfg)
315 {
316         return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, shader_cfg);
317 }
318
319 GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
320 {
321         GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
322         if (DRW_state_is_image_render() || !deferred) {
323                 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
324                         /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
325                          * with the shader code and we will resume the compilation from there. */
326                         return NULL;
327                 }
328         }
329         return mat;
330 }
331
332 GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
333 {
334         GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
335         if (DRW_state_is_image_render() || !deferred) {
336                 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
337                         /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
338                          * with the shader code and we will resume the compilation from there. */
339                         return NULL;
340                 }
341         }
342         return mat;
343 }
344
345 GPUMaterial *DRW_shader_create_from_world(
346         struct Scene *scene, World *wo, const void *engine_type, int options,
347         const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
348 {
349         GPUMaterial *mat = NULL;
350         if (DRW_state_is_image_render()) {
351                 mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
352         }
353
354         if (mat == NULL) {
355                 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
356                 mat = GPU_material_from_nodetree(
357                         scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
358                         vert, geom, frag_lib, defines, wo->id.name);
359         }
360
361         if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
362                 drw_deferred_shader_add(mat, deferred);
363         }
364
365         return mat;
366 }
367
368 GPUMaterial *DRW_shader_create_from_material(
369         struct Scene *scene, Material *ma, const void *engine_type, int options,
370         const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
371 {
372         GPUMaterial *mat = NULL;
373         if (DRW_state_is_image_render()) {
374                 mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
375         }
376
377         if (mat == NULL) {
378                 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
379                 mat = GPU_material_from_nodetree(
380                         scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
381                         vert, geom, frag_lib, defines, ma->id.name);
382         }
383
384         if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
385                 drw_deferred_shader_add(mat, deferred);
386         }
387
388         return mat;
389 }
390
391 void DRW_shader_free(GPUShader *shader)
392 {
393         GPU_shader_free(shader);
394 }