GPU: refactor clipped drawing from DRW into GPU
[blender.git] / source / blender / draw / intern / draw_manager_shader.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18
19 /** \file blender/draw/intern/draw_manager_shader.c
20  *  \ingroup draw
21  */
22
23 #include "DNA_object_types.h"
24 #include "DNA_world_types.h"
25 #include "DNA_material_types.h"
26
27 #include "BLI_listbase.h"
28 #include "BLI_string_utils.h"
29 #include "BLI_threads.h"
30
31 #include "BKE_global.h"
32 #include "BKE_main.h"
33
34 #include "DEG_depsgraph_query.h"
35
36 #include "GPU_shader.h"
37 #include "GPU_material.h"
38
39 #include "WM_api.h"
40 #include "WM_types.h"
41
42 #include "draw_manager.h"
43
44 extern char datatoc_gpu_shader_2D_vert_glsl[];
45 extern char datatoc_gpu_shader_3D_vert_glsl[];
46 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
47 extern char datatoc_common_fullscreen_vert_glsl[];
48
49 #define USE_DEFERRED_COMPILATION 1
50
51 /* -------------------------------------------------------------------- */
52 /** \name Deferred Compilation (DRW_deferred)
53  *
54  * Since compiling shader can take a long time, we do it in a non blocking
55  * manner in another thread.
56  *
57  * \{ */
58
59 typedef struct DRWDeferredShader {
60         struct DRWDeferredShader *prev, *next;
61
62         GPUMaterial *mat;
63 } DRWDeferredShader;
64
65 typedef struct DRWShaderCompiler {
66         ListBase queue; /* DRWDeferredShader */
67         SpinLock list_lock;
68
69         DRWDeferredShader *mat_compiling;
70         ThreadMutex compilation_lock;
71
72         void *gl_context;
73         bool own_context;
74
75         int shaders_done; /* To compute progress. */
76 } DRWShaderCompiler;
77
78 static void drw_deferred_shader_free(DRWDeferredShader *dsh)
79 {
80         /* Make sure it is not queued before freeing. */
81         MEM_freeN(dsh);
82 }
83
84 static void drw_deferred_shader_queue_free(ListBase *queue)
85 {
86         DRWDeferredShader *dsh;
87         while ((dsh = BLI_pophead(queue))) {
88                 drw_deferred_shader_free(dsh);
89         }
90 }
91
92 static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
93 {
94         DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
95         void *gl_context = comp->gl_context;
96
97         WM_opengl_context_activate(gl_context);
98
99         while (true) {
100                 BLI_spin_lock(&comp->list_lock);
101
102                 if (*stop != 0) {
103                         /* We don't want user to be able to cancel the compilation
104                          * but wm can kill the task if we are closing blender. */
105                         BLI_spin_unlock(&comp->list_lock);
106                         break;
107                 }
108
109                 /* Pop tail because it will be less likely to lock the main thread
110                  * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
111                 comp->mat_compiling = BLI_poptail(&comp->queue);
112                 if (comp->mat_compiling == NULL) {
113                         /* No more Shader to compile. */
114                         BLI_spin_unlock(&comp->list_lock);
115                         break;
116                 }
117
118                 comp->shaders_done++;
119                 int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
120
121                 BLI_mutex_lock(&comp->compilation_lock);
122                 BLI_spin_unlock(&comp->list_lock);
123
124                 /* Do the compilation. */
125                 GPU_material_compile(comp->mat_compiling->mat);
126
127                 *progress = (float)comp->shaders_done / (float)total;
128                 *do_update = true;
129
130                 GPU_flush();
131                 BLI_mutex_unlock(&comp->compilation_lock);
132
133                 BLI_spin_lock(&comp->list_lock);
134                 drw_deferred_shader_free(comp->mat_compiling);
135                 comp->mat_compiling = NULL;
136                 BLI_spin_unlock(&comp->list_lock);
137         }
138
139         WM_opengl_context_release(gl_context);
140 }
141
142 static void drw_deferred_shader_compilation_free(void *custom_data)
143 {
144         DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
145
146         drw_deferred_shader_queue_free(&comp->queue);
147
148         BLI_spin_end(&comp->list_lock);
149         BLI_mutex_end(&comp->compilation_lock);
150
151         if (comp->own_context) {
152                 /* Only destroy if the job owns the context. */
153                 WM_opengl_context_dispose(comp->gl_context);
154         }
155
156         MEM_freeN(comp);
157 }
158
159 static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
160 {
161         /* Do not deferre the compilation if we are rendering for image. */
162         if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
163                 /* Double checking that this GPUMaterial is not going to be
164                  * compiled by another thread. */
165                 DRW_deferred_shader_remove(mat);
166                 GPU_material_compile(mat);
167                 return;
168         }
169
170         DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
171
172         dsh->mat = mat;
173
174         BLI_assert(DST.draw_ctx.evil_C);
175         wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
176         wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
177
178         /* Use original scene ID since this is what the jobs template tests for. */
179         Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
180
181         /* Get the running job or a new one if none is running. Can only have one job per type & owner.  */
182         wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
183                                     WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
184
185         DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
186
187         DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
188         BLI_spin_init(&comp->list_lock);
189         BLI_mutex_init(&comp->compilation_lock);
190
191         if (old_comp) {
192                 BLI_spin_lock(&old_comp->list_lock);
193                 BLI_movelisttolist(&comp->queue, &old_comp->queue);
194                 BLI_spin_unlock(&old_comp->list_lock);
195                 /* Do not recreate context, just pass ownership. */
196                 if (old_comp->gl_context) {
197                         comp->gl_context = old_comp->gl_context;
198                         old_comp->own_context = false;
199                         comp->own_context = true;
200                 }
201         }
202
203         BLI_addtail(&comp->queue, dsh);
204
205         /* Create only one context. */
206         if (comp->gl_context == NULL) {
207                 comp->gl_context = WM_opengl_context_create();
208                 WM_opengl_context_activate(DST.gl_context);
209                 comp->own_context = true;
210         }
211
212         WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
213         WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
214         WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
215         WM_jobs_start(wm, wm_job);
216 }
217
218 void DRW_deferred_shader_remove(GPUMaterial *mat)
219 {
220         Scene *scene = GPU_material_scene(mat);
221
222         for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
223                 if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
224                         /* No job running, do not create a new one by calling WM_jobs_get. */
225                         continue;
226                 }
227                 for (wmWindow *win = wm->windows.first; win; win = win->next) {
228                         wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
229                                                     WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
230
231                         DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
232                         if (comp != NULL) {
233                                 BLI_spin_lock(&comp->list_lock);
234                                 DRWDeferredShader *dsh;
235                                 dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
236                                 if (dsh) {
237                                         BLI_remlink(&comp->queue, dsh);
238                                 }
239
240                                 /* Wait for compilation to finish */
241                                 if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
242                                         BLI_mutex_lock(&comp->compilation_lock);
243                                         BLI_mutex_unlock(&comp->compilation_lock);
244                                 }
245
246                                 BLI_spin_unlock(&comp->list_lock);
247
248                                 if (dsh) {
249                                         drw_deferred_shader_free(dsh);
250                                 }
251                         }
252                 }
253         }
254 }
255
256 /** \} */
257
258 /* -------------------------------------------------------------------- */
259
260 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
261 {
262         return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
263 }
264
265 GPUShader *DRW_shader_create_with_lib(
266         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
267 {
268         GPUShader *sh;
269         char *vert_with_lib = NULL;
270         char *frag_with_lib = NULL;
271         char *geom_with_lib = NULL;
272
273         vert_with_lib = BLI_string_joinN(lib, vert);
274         frag_with_lib = BLI_string_joinN(lib, frag);
275         if (geom) {
276                 geom_with_lib = BLI_string_joinN(lib, geom);
277         }
278
279         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
280
281         MEM_freeN(vert_with_lib);
282         MEM_freeN(frag_with_lib);
283         if (geom) {
284                 MEM_freeN(geom_with_lib);
285         }
286
287         return sh;
288 }
289
290 GPUShader *DRW_shader_create_with_transform_feedback(
291         const char *vert, const char *geom, const char *defines,
292         const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count)
293 {
294         return GPU_shader_create_ex(vert,
295                                     datatoc_gpu_shader_depth_only_frag_glsl,
296                                     geom, NULL, defines,
297                                     prim_type, varying_names, varying_count, __func__);
298 }
299
300 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
301 {
302         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
303 }
304
305 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
306 {
307         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
308 }
309
310 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
311 {
312         return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
313 }
314
315 GPUShader *DRW_shader_create_3D_depth_only(eGPUShaderConfig shader_cfg)
316 {
317         return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, shader_cfg);
318 }
319
320 GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
321 {
322         GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
323         if (DRW_state_is_image_render() || !deferred) {
324                 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
325                         /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
326                          * with the shader code and we will resume the compilation from there. */
327                         return NULL;
328                 }
329         }
330         return mat;
331 }
332
333 GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
334 {
335         GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
336         if (DRW_state_is_image_render() || !deferred) {
337                 if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
338                         /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
339                          * with the shader code and we will resume the compilation from there. */
340                         return NULL;
341                 }
342         }
343         return mat;
344 }
345
346 GPUMaterial *DRW_shader_create_from_world(
347         struct Scene *scene, World *wo, const void *engine_type, int options,
348         const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
349 {
350         GPUMaterial *mat = NULL;
351         if (DRW_state_is_image_render()) {
352                 mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
353         }
354
355         if (mat == NULL) {
356                 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
357                 mat = GPU_material_from_nodetree(
358                         scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
359                         vert, geom, frag_lib, defines, wo->id.name);
360         }
361
362         if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
363                 drw_deferred_shader_add(mat, deferred);
364         }
365
366         return mat;
367 }
368
369 GPUMaterial *DRW_shader_create_from_material(
370         struct Scene *scene, Material *ma, const void *engine_type, int options,
371         const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
372 {
373         GPUMaterial *mat = NULL;
374         if (DRW_state_is_image_render()) {
375                 mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
376         }
377
378         if (mat == NULL) {
379                 scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
380                 mat = GPU_material_from_nodetree(
381                         scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
382                         vert, geom, frag_lib, defines, ma->id.name);
383         }
384
385         if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
386                 drw_deferred_shader_add(mat, deferred);
387         }
388
389         return mat;
390 }
391
392 void DRW_shader_free(GPUShader *shader)
393 {
394         GPU_shader_free(shader);
395 }