2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 * Copyright 2016, Blender Foundation.
19 /** \file blender/draw/intern/draw_manager_shader.c
23 #include "DNA_object_types.h"
24 #include "DNA_world_types.h"
25 #include "DNA_material_types.h"
27 #include "BLI_listbase.h"
28 #include "BLI_string_utils.h"
29 #include "BLI_threads.h"
31 #include "BKE_global.h"
34 #include "DEG_depsgraph_query.h"
36 #include "GPU_shader.h"
37 #include "GPU_material.h"
42 #include "draw_manager.h"
44 extern char datatoc_gpu_shader_2D_vert_glsl[];
45 extern char datatoc_gpu_shader_3D_vert_glsl[];
46 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
47 extern char datatoc_common_fullscreen_vert_glsl[];
49 #define USE_DEFERRED_COMPILATION 1
51 /* -------------------------------------------------------------------- */
52 /** \name Deferred Compilation (DRW_deferred)
54 * Since compiling shader can take a long time, we do it in a non blocking
55 * manner in another thread.
59 typedef struct DRWDeferredShader {
60 struct DRWDeferredShader *prev, *next;
65 typedef struct DRWShaderCompiler {
66 ListBase queue; /* DRWDeferredShader */
69 DRWDeferredShader *mat_compiling;
70 ThreadMutex compilation_lock;
75 int shaders_done; /* To compute progress. */
78 static void drw_deferred_shader_free(DRWDeferredShader *dsh)
80 /* Make sure it is not queued before freeing. */
84 static void drw_deferred_shader_queue_free(ListBase *queue)
86 DRWDeferredShader *dsh;
87 while ((dsh = BLI_pophead(queue))) {
88 drw_deferred_shader_free(dsh);
92 static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
94 DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
95 void *gl_context = comp->gl_context;
97 WM_opengl_context_activate(gl_context);
100 BLI_spin_lock(&comp->list_lock);
103 /* We don't want user to be able to cancel the compilation
104 * but wm can kill the task if we are closing blender. */
105 BLI_spin_unlock(&comp->list_lock);
109 /* Pop tail because it will be less likely to lock the main thread
110 * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
111 comp->mat_compiling = BLI_poptail(&comp->queue);
112 if (comp->mat_compiling == NULL) {
113 /* No more Shader to compile. */
114 BLI_spin_unlock(&comp->list_lock);
118 comp->shaders_done++;
119 int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
121 BLI_mutex_lock(&comp->compilation_lock);
122 BLI_spin_unlock(&comp->list_lock);
124 /* Do the compilation. */
125 GPU_material_compile(comp->mat_compiling->mat);
127 *progress = (float)comp->shaders_done / (float)total;
131 BLI_mutex_unlock(&comp->compilation_lock);
133 BLI_spin_lock(&comp->list_lock);
134 drw_deferred_shader_free(comp->mat_compiling);
135 comp->mat_compiling = NULL;
136 BLI_spin_unlock(&comp->list_lock);
139 WM_opengl_context_release(gl_context);
142 static void drw_deferred_shader_compilation_free(void *custom_data)
144 DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
146 drw_deferred_shader_queue_free(&comp->queue);
148 BLI_spin_end(&comp->list_lock);
149 BLI_mutex_end(&comp->compilation_lock);
151 if (comp->own_context) {
152 /* Only destroy if the job owns the context. */
153 WM_opengl_context_dispose(comp->gl_context);
159 static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
161 /* Do not deferre the compilation if we are rendering for image. */
162 if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
163 /* Double checking that this GPUMaterial is not going to be
164 * compiled by another thread. */
165 DRW_deferred_shader_remove(mat);
166 GPU_material_compile(mat);
170 DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
174 BLI_assert(DST.draw_ctx.evil_C);
175 wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
176 wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
178 /* Use original scene ID since this is what the jobs template tests for. */
179 Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
181 /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
182 wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
183 WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
185 DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
187 DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
188 BLI_spin_init(&comp->list_lock);
189 BLI_mutex_init(&comp->compilation_lock);
192 BLI_spin_lock(&old_comp->list_lock);
193 BLI_movelisttolist(&comp->queue, &old_comp->queue);
194 BLI_spin_unlock(&old_comp->list_lock);
195 /* Do not recreate context, just pass ownership. */
196 if (old_comp->gl_context) {
197 comp->gl_context = old_comp->gl_context;
198 old_comp->own_context = false;
199 comp->own_context = true;
203 BLI_addtail(&comp->queue, dsh);
205 /* Create only one context. */
206 if (comp->gl_context == NULL) {
207 comp->gl_context = WM_opengl_context_create();
208 WM_opengl_context_activate(DST.gl_context);
209 comp->own_context = true;
212 WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
213 WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
214 WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
215 WM_jobs_start(wm, wm_job);
218 void DRW_deferred_shader_remove(GPUMaterial *mat)
220 Scene *scene = GPU_material_scene(mat);
222 for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
223 if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
224 /* No job running, do not create a new one by calling WM_jobs_get. */
227 for (wmWindow *win = wm->windows.first; win; win = win->next) {
228 wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
229 WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
231 DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
233 BLI_spin_lock(&comp->list_lock);
234 DRWDeferredShader *dsh;
235 dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
237 BLI_remlink(&comp->queue, dsh);
240 /* Wait for compilation to finish */
241 if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
242 BLI_mutex_lock(&comp->compilation_lock);
243 BLI_mutex_unlock(&comp->compilation_lock);
246 BLI_spin_unlock(&comp->list_lock);
249 drw_deferred_shader_free(dsh);
258 /* -------------------------------------------------------------------- */
260 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
262 return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
265 GPUShader *DRW_shader_create_with_lib(
266 const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
269 char *vert_with_lib = NULL;
270 char *frag_with_lib = NULL;
271 char *geom_with_lib = NULL;
273 vert_with_lib = BLI_string_joinN(lib, vert);
274 frag_with_lib = BLI_string_joinN(lib, frag);
276 geom_with_lib = BLI_string_joinN(lib, geom);
279 sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
281 MEM_freeN(vert_with_lib);
282 MEM_freeN(frag_with_lib);
284 MEM_freeN(geom_with_lib);
290 GPUShader *DRW_shader_create_with_transform_feedback(
291 const char *vert, const char *geom, const char *defines,
292 const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count)
294 return GPU_shader_create_ex(vert,
295 datatoc_gpu_shader_depth_only_frag_glsl,
297 prim_type, varying_names, varying_count, __func__);
300 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
302 return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
305 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
307 return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
310 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
312 return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
315 GPUShader *DRW_shader_create_3D_depth_only(eGPUShaderConfig shader_cfg)
317 return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, shader_cfg);
320 GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
322 GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
323 if (DRW_state_is_image_render() || !deferred) {
324 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
325 /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
326 * with the shader code and we will resume the compilation from there. */
333 GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
335 GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
336 if (DRW_state_is_image_render() || !deferred) {
337 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
338 /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
339 * with the shader code and we will resume the compilation from there. */
346 GPUMaterial *DRW_shader_create_from_world(
347 struct Scene *scene, World *wo, const void *engine_type, int options,
348 const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
350 GPUMaterial *mat = NULL;
351 if (DRW_state_is_image_render()) {
352 mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
356 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
357 mat = GPU_material_from_nodetree(
358 scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
359 vert, geom, frag_lib, defines, wo->id.name);
362 if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
363 drw_deferred_shader_add(mat, deferred);
369 GPUMaterial *DRW_shader_create_from_material(
370 struct Scene *scene, Material *ma, const void *engine_type, int options,
371 const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
373 GPUMaterial *mat = NULL;
374 if (DRW_state_is_image_render()) {
375 mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
379 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
380 mat = GPU_material_from_nodetree(
381 scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
382 vert, geom, frag_lib, defines, ma->id.name);
385 if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
386 drw_deferred_shader_add(mat, deferred);
392 void DRW_shader_free(GPUShader *shader)
394 GPU_shader_free(shader);