2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 * Copyright 2016, Blender Foundation.
19 /** \file \ingroup draw
22 #include "DNA_object_types.h"
23 #include "DNA_world_types.h"
24 #include "DNA_material_types.h"
26 #include "BLI_listbase.h"
27 #include "BLI_string_utils.h"
28 #include "BLI_threads.h"
30 #include "BKE_global.h"
33 #include "DEG_depsgraph_query.h"
35 #include "GPU_shader.h"
36 #include "GPU_material.h"
41 #include "draw_manager.h"
43 extern char datatoc_gpu_shader_2D_vert_glsl[];
44 extern char datatoc_gpu_shader_3D_vert_glsl[];
45 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
46 extern char datatoc_common_fullscreen_vert_glsl[];
48 #define USE_DEFERRED_COMPILATION 1
50 /* -------------------------------------------------------------------- */
51 /** \name Deferred Compilation (DRW_deferred)
53 * Since compiling shader can take a long time, we do it in a non blocking
54 * manner in another thread.
58 typedef struct DRWDeferredShader {
59 struct DRWDeferredShader *prev, *next;
64 typedef struct DRWShaderCompiler {
65 ListBase queue; /* DRWDeferredShader */
68 DRWDeferredShader *mat_compiling;
69 ThreadMutex compilation_lock;
74 int shaders_done; /* To compute progress. */
77 static void drw_deferred_shader_free(DRWDeferredShader *dsh)
79 /* Make sure it is not queued before freeing. */
83 static void drw_deferred_shader_queue_free(ListBase *queue)
85 DRWDeferredShader *dsh;
86 while ((dsh = BLI_pophead(queue))) {
87 drw_deferred_shader_free(dsh);
91 static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
93 DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
94 void *gl_context = comp->gl_context;
96 WM_opengl_context_activate(gl_context);
99 BLI_spin_lock(&comp->list_lock);
102 /* We don't want user to be able to cancel the compilation
103 * but wm can kill the task if we are closing blender. */
104 BLI_spin_unlock(&comp->list_lock);
108 /* Pop tail because it will be less likely to lock the main thread
109 * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
110 comp->mat_compiling = BLI_poptail(&comp->queue);
111 if (comp->mat_compiling == NULL) {
112 /* No more Shader to compile. */
113 BLI_spin_unlock(&comp->list_lock);
117 comp->shaders_done++;
118 int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
120 BLI_mutex_lock(&comp->compilation_lock);
121 BLI_spin_unlock(&comp->list_lock);
123 /* Do the compilation. */
124 GPU_material_compile(comp->mat_compiling->mat);
126 *progress = (float)comp->shaders_done / (float)total;
130 BLI_mutex_unlock(&comp->compilation_lock);
132 BLI_spin_lock(&comp->list_lock);
133 drw_deferred_shader_free(comp->mat_compiling);
134 comp->mat_compiling = NULL;
135 BLI_spin_unlock(&comp->list_lock);
138 WM_opengl_context_release(gl_context);
141 static void drw_deferred_shader_compilation_free(void *custom_data)
143 DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
145 drw_deferred_shader_queue_free(&comp->queue);
147 BLI_spin_end(&comp->list_lock);
148 BLI_mutex_end(&comp->compilation_lock);
150 if (comp->own_context) {
151 /* Only destroy if the job owns the context. */
152 WM_opengl_context_dispose(comp->gl_context);
158 static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
160 /* Do not deferre the compilation if we are rendering for image. */
161 if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
162 /* Double checking that this GPUMaterial is not going to be
163 * compiled by another thread. */
164 DRW_deferred_shader_remove(mat);
165 GPU_material_compile(mat);
169 DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
173 BLI_assert(DST.draw_ctx.evil_C);
174 wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
175 wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
177 /* Use original scene ID since this is what the jobs template tests for. */
178 Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
180 /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
181 wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
182 WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
184 DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
186 DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
187 BLI_spin_init(&comp->list_lock);
188 BLI_mutex_init(&comp->compilation_lock);
191 BLI_spin_lock(&old_comp->list_lock);
192 BLI_movelisttolist(&comp->queue, &old_comp->queue);
193 BLI_spin_unlock(&old_comp->list_lock);
194 /* Do not recreate context, just pass ownership. */
195 if (old_comp->gl_context) {
196 comp->gl_context = old_comp->gl_context;
197 old_comp->own_context = false;
198 comp->own_context = true;
202 BLI_addtail(&comp->queue, dsh);
204 /* Create only one context. */
205 if (comp->gl_context == NULL) {
206 comp->gl_context = WM_opengl_context_create();
207 WM_opengl_context_activate(DST.gl_context);
208 comp->own_context = true;
211 WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
212 WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
213 WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
214 WM_jobs_start(wm, wm_job);
217 void DRW_deferred_shader_remove(GPUMaterial *mat)
219 Scene *scene = GPU_material_scene(mat);
221 for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
222 if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
223 /* No job running, do not create a new one by calling WM_jobs_get. */
226 for (wmWindow *win = wm->windows.first; win; win = win->next) {
227 wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
228 WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
230 DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
232 BLI_spin_lock(&comp->list_lock);
233 DRWDeferredShader *dsh;
234 dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
236 BLI_remlink(&comp->queue, dsh);
239 /* Wait for compilation to finish */
240 if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
241 BLI_mutex_lock(&comp->compilation_lock);
242 BLI_mutex_unlock(&comp->compilation_lock);
245 BLI_spin_unlock(&comp->list_lock);
248 drw_deferred_shader_free(dsh);
257 /* -------------------------------------------------------------------- */
259 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
261 return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
264 GPUShader *DRW_shader_create_with_lib(
265 const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
268 char *vert_with_lib = NULL;
269 char *frag_with_lib = NULL;
270 char *geom_with_lib = NULL;
272 vert_with_lib = BLI_string_joinN(lib, vert);
273 frag_with_lib = BLI_string_joinN(lib, frag);
275 geom_with_lib = BLI_string_joinN(lib, geom);
278 sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
280 MEM_freeN(vert_with_lib);
281 MEM_freeN(frag_with_lib);
283 MEM_freeN(geom_with_lib);
289 GPUShader *DRW_shader_create_with_transform_feedback(
290 const char *vert, const char *geom, const char *defines,
291 const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count)
293 return GPU_shader_create_ex(vert,
294 datatoc_gpu_shader_depth_only_frag_glsl,
296 prim_type, varying_names, varying_count, __func__);
299 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
301 return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
304 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
306 return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
309 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
311 return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
314 GPUShader *DRW_shader_create_3D_depth_only(eGPUShaderConfig sh_cfg)
316 return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, sh_cfg);
319 GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
321 GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
322 if (DRW_state_is_image_render() || !deferred) {
323 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
324 /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
325 * with the shader code and we will resume the compilation from there. */
332 GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
334 GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
335 if (DRW_state_is_image_render() || !deferred) {
336 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
337 /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
338 * with the shader code and we will resume the compilation from there. */
345 GPUMaterial *DRW_shader_create_from_world(
346 struct Scene *scene, World *wo, const void *engine_type, int options,
347 const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
349 GPUMaterial *mat = NULL;
350 if (DRW_state_is_image_render()) {
351 mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
355 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
356 mat = GPU_material_from_nodetree(
357 scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
358 vert, geom, frag_lib, defines, wo->id.name);
361 if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
362 drw_deferred_shader_add(mat, deferred);
368 GPUMaterial *DRW_shader_create_from_material(
369 struct Scene *scene, Material *ma, const void *engine_type, int options,
370 const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
372 GPUMaterial *mat = NULL;
373 if (DRW_state_is_image_render()) {
374 mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
378 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
379 mat = GPU_material_from_nodetree(
380 scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
381 vert, geom, frag_lib, defines, ma->id.name);
384 if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
385 drw_deferred_shader_add(mat, deferred);
391 void DRW_shader_free(GPUShader *shader)
393 GPU_shader_free(shader);