Cycles: Fixed uninitialized memory
[blender.git] / intern / cycles / device / device_cpu.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdlib.h>
18 #include <string.h>
19
20 /* So ImathMath is included before our kernel_cpu_compat. */
21 #ifdef WITH_OSL
22 /* So no context pollution happens from indirectly included windows.h */
23 #  include "util/util_windows.h"
24 #  include <OSL/oslexec.h>
25 #endif
26
27 #include "device/device.h"
28 #include "device/device_denoising.h"
29 #include "device/device_intern.h"
30 #include "device/device_split_kernel.h"
31
32 #include "kernel/kernel.h"
33 #include "kernel/kernel_compat_cpu.h"
34 #include "kernel/kernel_types.h"
35 #include "kernel/split/kernel_split_data.h"
36 #include "kernel/kernel_globals.h"
37
38 #include "kernel/filter/filter.h"
39
40 #include "kernel/osl/osl_shader.h"
41 #include "kernel/osl/osl_globals.h"
42
43 #include "render/buffers.h"
44 #include "render/coverage.h"
45
46 #include "util/util_debug.h"
47 #include "util/util_foreach.h"
48 #include "util/util_function.h"
49 #include "util/util_logging.h"
50 #include "util/util_map.h"
51 #include "util/util_opengl.h"
52 #include "util/util_optimization.h"
53 #include "util/util_progress.h"
54 #include "util/util_system.h"
55 #include "util/util_thread.h"
56
57 CCL_NAMESPACE_BEGIN
58
59 class CPUDevice;
60
61 /* Has to be outside of the class to be shared across template instantiations. */
62 static const char *logged_architecture = "";
63
64 template<typename F>
65 class KernelFunctions {
66 public:
67         KernelFunctions()
68         {
69                 kernel = (F)NULL;
70         }
71
72         KernelFunctions(F kernel_default,
73                         F kernel_sse2,
74                         F kernel_sse3,
75                         F kernel_sse41,
76                         F kernel_avx,
77                         F kernel_avx2)
78         {
79                 const char *architecture_name = "default";
80                 kernel = kernel_default;
81
82                 /* Silence potential warnings about unused variables
83                  * when compiling without some architectures. */
84                 (void) kernel_sse2;
85                 (void) kernel_sse3;
86                 (void) kernel_sse41;
87                 (void) kernel_avx;
88                 (void) kernel_avx2;
89 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX2
90                 if(DebugFlags().cpu.has_avx2() && system_cpu_support_avx2()) {
91                         architecture_name = "AVX2";
92                         kernel = kernel_avx2;
93                 }
94                 else
95 #endif
96 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX
97                 if(DebugFlags().cpu.has_avx() && system_cpu_support_avx()) {
98                         architecture_name = "AVX";
99                         kernel = kernel_avx;
100                 }
101                 else
102 #endif
103 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
104                 if(DebugFlags().cpu.has_sse41() && system_cpu_support_sse41()) {
105                         architecture_name = "SSE4.1";
106                         kernel = kernel_sse41;
107                 }
108                 else
109 #endif
110 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE3
111                 if(DebugFlags().cpu.has_sse3() && system_cpu_support_sse3()) {
112                         architecture_name = "SSE3";
113                         kernel = kernel_sse3;
114                 }
115                 else
116 #endif
117 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE2
118                 if(DebugFlags().cpu.has_sse2() && system_cpu_support_sse2()) {
119                         architecture_name = "SSE2";
120                         kernel = kernel_sse2;
121                 }
122 #endif
123
124                 if(strcmp(architecture_name, logged_architecture) != 0) {
125                         VLOG(1) << "Will be using " << architecture_name << " kernels.";
126                         logged_architecture = architecture_name;
127                 }
128         }
129
130         inline F operator()() const {
131                 assert(kernel);
132                 return kernel;
133         }
134 protected:
135         F kernel;
136 };
137
138 class CPUSplitKernel : public DeviceSplitKernel {
139         CPUDevice *device;
140 public:
141         explicit CPUSplitKernel(CPUDevice *device);
142
143         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
144                                                     RenderTile& rtile,
145                                                     int num_global_elements,
146                                                     device_memory& kernel_globals,
147                                                     device_memory& kernel_data_,
148                                                     device_memory& split_data,
149                                                     device_memory& ray_state,
150                                                     device_memory& queue_index,
151                                                     device_memory& use_queues_flag,
152                                                     device_memory& work_pool_wgs);
153
154         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
155                                                                const DeviceRequestedFeatures&);
156         virtual int2 split_kernel_local_size();
157         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
158         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
159 };
160
161 class CPUDevice : public Device
162 {
163 public:
164         TaskPool task_pool;
165         KernelGlobals kernel_globals;
166
167         device_vector<TextureInfo> texture_info;
168         bool need_texture_info;
169
170 #ifdef WITH_OSL
171         OSLGlobals osl_globals;
172 #endif
173
174         bool use_split_kernel;
175
176         DeviceRequestedFeatures requested_features;
177
178         KernelFunctions<void(*)(KernelGlobals *, float *, int, int, int, int, int)>             path_trace_kernel;
179         KernelFunctions<void(*)(KernelGlobals *, uchar4 *, float *, float, int, int, int, int)> convert_to_half_float_kernel;
180         KernelFunctions<void(*)(KernelGlobals *, uchar4 *, float *, float, int, int, int, int)> convert_to_byte_kernel;
181         KernelFunctions<void(*)(KernelGlobals *, uint4 *, float4 *, int, int, int, int, int)>   shader_kernel;
182
183         KernelFunctions<void(*)(int, TileInfo*, int, int, float*, float*, float*, float*, float*, int*, int, int)> filter_divide_shadow_kernel;
184         KernelFunctions<void(*)(int, TileInfo*, int, int, int, int, float*, float*, int*, int, int)>               filter_get_feature_kernel;
185         KernelFunctions<void(*)(int, int, float*, float*, float*, float*, int*, int)>                               filter_detect_outliers_kernel;
186         KernelFunctions<void(*)(int, int, float*, float*, float*, float*, int*, int)>                               filter_combine_halves_kernel;
187
188         KernelFunctions<void(*)(int, int, float*, float*, float*, int*, int, int, float, float)>   filter_nlm_calc_difference_kernel;
189         KernelFunctions<void(*)(float*, float*, int*, int, int)>                                   filter_nlm_blur_kernel;
190         KernelFunctions<void(*)(float*, float*, int*, int, int)>                                   filter_nlm_calc_weight_kernel;
191         KernelFunctions<void(*)(int, int, float*, float*, float*, float*, float*, int*, int, int)> filter_nlm_update_output_kernel;
192         KernelFunctions<void(*)(float*, float*, int*, int)>                                        filter_nlm_normalize_kernel;
193
194         KernelFunctions<void(*)(float*, int, int, int, float*, int*, int*, int, int, float)>                         filter_construct_transform_kernel;
195         KernelFunctions<void(*)(int, int, float*, float*, float*, int*, float*, float3*, int*, int*, int, int, int)> filter_nlm_construct_gramian_kernel;
196         KernelFunctions<void(*)(int, int, int, float*, int*, float*, float3*, int*, int)>                            filter_finalize_kernel;
197
198         KernelFunctions<void(*)(KernelGlobals *, ccl_constant KernelData*, ccl_global void*, int, ccl_global char*,
199                                int, int, int, int, int, int, int, int, ccl_global int*, int,
200                                ccl_global char*, ccl_global unsigned int*, unsigned int, ccl_global float*)>        data_init_kernel;
201         unordered_map<string, KernelFunctions<void(*)(KernelGlobals*, KernelData*)> > split_kernels;
202
203 #define KERNEL_FUNCTIONS(name) \
204               KERNEL_NAME_EVAL(cpu, name), \
205               KERNEL_NAME_EVAL(cpu_sse2, name), \
206               KERNEL_NAME_EVAL(cpu_sse3, name), \
207               KERNEL_NAME_EVAL(cpu_sse41, name), \
208               KERNEL_NAME_EVAL(cpu_avx, name), \
209               KERNEL_NAME_EVAL(cpu_avx2, name)
210
211         CPUDevice(DeviceInfo& info_, Stats &stats_, Profiler &profiler_, bool background_)
212         : Device(info_, stats_, profiler_, background_),
213           texture_info(this, "__texture_info", MEM_TEXTURE),
214 #define REGISTER_KERNEL(name) name ## _kernel(KERNEL_FUNCTIONS(name))
215           REGISTER_KERNEL(path_trace),
216           REGISTER_KERNEL(convert_to_half_float),
217           REGISTER_KERNEL(convert_to_byte),
218           REGISTER_KERNEL(shader),
219           REGISTER_KERNEL(filter_divide_shadow),
220           REGISTER_KERNEL(filter_get_feature),
221           REGISTER_KERNEL(filter_detect_outliers),
222           REGISTER_KERNEL(filter_combine_halves),
223           REGISTER_KERNEL(filter_nlm_calc_difference),
224           REGISTER_KERNEL(filter_nlm_blur),
225           REGISTER_KERNEL(filter_nlm_calc_weight),
226           REGISTER_KERNEL(filter_nlm_update_output),
227           REGISTER_KERNEL(filter_nlm_normalize),
228           REGISTER_KERNEL(filter_construct_transform),
229           REGISTER_KERNEL(filter_nlm_construct_gramian),
230           REGISTER_KERNEL(filter_finalize),
231           REGISTER_KERNEL(data_init)
232 #undef REGISTER_KERNEL
233         {
234                 if(info.cpu_threads == 0) {
235                         info.cpu_threads = TaskScheduler::num_threads();
236                 }
237
238 #ifdef WITH_OSL
239                 kernel_globals.osl = &osl_globals;
240 #endif
241                 use_split_kernel = DebugFlags().cpu.split_kernel;
242                 if(use_split_kernel) {
243                         VLOG(1) << "Will be using split kernel.";
244                 }
245                 need_texture_info = false;
246
247 #define REGISTER_SPLIT_KERNEL(name) split_kernels[#name] = KernelFunctions<void(*)(KernelGlobals*, KernelData*)>(KERNEL_FUNCTIONS(name))
248                 REGISTER_SPLIT_KERNEL(path_init);
249                 REGISTER_SPLIT_KERNEL(scene_intersect);
250                 REGISTER_SPLIT_KERNEL(lamp_emission);
251                 REGISTER_SPLIT_KERNEL(do_volume);
252                 REGISTER_SPLIT_KERNEL(queue_enqueue);
253                 REGISTER_SPLIT_KERNEL(indirect_background);
254                 REGISTER_SPLIT_KERNEL(shader_setup);
255                 REGISTER_SPLIT_KERNEL(shader_sort);
256                 REGISTER_SPLIT_KERNEL(shader_eval);
257                 REGISTER_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao);
258                 REGISTER_SPLIT_KERNEL(subsurface_scatter);
259                 REGISTER_SPLIT_KERNEL(direct_lighting);
260                 REGISTER_SPLIT_KERNEL(shadow_blocked_ao);
261                 REGISTER_SPLIT_KERNEL(shadow_blocked_dl);
262                 REGISTER_SPLIT_KERNEL(enqueue_inactive);
263                 REGISTER_SPLIT_KERNEL(next_iteration_setup);
264                 REGISTER_SPLIT_KERNEL(indirect_subsurface);
265                 REGISTER_SPLIT_KERNEL(buffer_update);
266 #undef REGISTER_SPLIT_KERNEL
267 #undef KERNEL_FUNCTIONS
268         }
269
270         ~CPUDevice()
271         {
272                 task_pool.stop();
273                 texture_info.free();
274         }
275
276         virtual bool show_samples() const
277         {
278                 return (info.cpu_threads == 1);
279         }
280
281         virtual BVHLayoutMask get_bvh_layout_mask() const {
282                 BVHLayoutMask bvh_layout_mask = BVH_LAYOUT_BVH2;
283                 if(DebugFlags().cpu.has_sse2() && system_cpu_support_sse2()) {
284                         bvh_layout_mask |= BVH_LAYOUT_BVH4;
285                 }
286                 if(DebugFlags().cpu.has_avx2() && system_cpu_support_avx2()) {
287                         bvh_layout_mask |= BVH_LAYOUT_BVH8;
288                 }
289 #ifdef WITH_EMBREE
290                 bvh_layout_mask |= BVH_LAYOUT_EMBREE;
291 #endif  /* WITH_EMBREE */
292                 return bvh_layout_mask;
293         }
294
295         void load_texture_info()
296         {
297                 if(need_texture_info) {
298                         texture_info.copy_to_device();
299                         need_texture_info = false;
300                 }
301         }
302
303         void mem_alloc(device_memory& mem)
304         {
305                 if(mem.type == MEM_TEXTURE) {
306                         assert(!"mem_alloc not supported for textures.");
307                 }
308                 else {
309                         if(mem.name) {
310                                 VLOG(1) << "Buffer allocate: " << mem.name << ", "
311                                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
312                                                 << string_human_readable_size(mem.memory_size()) << ")";
313                         }
314
315                         if(mem.type == MEM_DEVICE_ONLY) {
316                                 assert(!mem.host_pointer);
317                                 size_t alignment = MIN_ALIGNMENT_CPU_DATA_TYPES;
318                                 void *data = util_aligned_malloc(mem.memory_size(), alignment);
319                                 mem.device_pointer = (device_ptr)data;
320                         }
321                         else {
322                                 mem.device_pointer = (device_ptr)mem.host_pointer;
323                         }
324
325                         mem.device_size = mem.memory_size();
326                         stats.mem_alloc(mem.device_size);
327                 }
328         }
329
330         void mem_copy_to(device_memory& mem)
331         {
332                 if(mem.type == MEM_TEXTURE) {
333                         tex_free(mem);
334                         tex_alloc(mem);
335                 }
336                 else if(mem.type == MEM_PIXELS) {
337                         assert(!"mem_copy_to not supported for pixels.");
338                 }
339                 else {
340                         if(!mem.device_pointer) {
341                                 mem_alloc(mem);
342                         }
343
344                         /* copy is no-op */
345                 }
346         }
347
348         void mem_copy_from(device_memory& /*mem*/,
349                            int /*y*/, int /*w*/, int /*h*/,
350                            int /*elem*/)
351         {
352                 /* no-op */
353         }
354
355         void mem_zero(device_memory& mem)
356         {
357                 if(!mem.device_pointer) {
358                         mem_alloc(mem);
359                 }
360
361                 if(mem.device_pointer) {
362                         memset((void*)mem.device_pointer, 0, mem.memory_size());
363                 }
364         }
365
366         void mem_free(device_memory& mem)
367         {
368                 if(mem.type == MEM_TEXTURE) {
369                         tex_free(mem);
370                 }
371                 else if(mem.device_pointer) {
372                         if(mem.type == MEM_DEVICE_ONLY) {
373                                 util_aligned_free((void*)mem.device_pointer);
374                         }
375                         mem.device_pointer = 0;
376                         stats.mem_free(mem.device_size);
377                         mem.device_size = 0;
378                 }
379         }
380
381         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
382         {
383                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
384         }
385
386         void const_copy_to(const char *name, void *host, size_t size)
387         {
388                 kernel_const_copy(&kernel_globals, name, host, size);
389         }
390
391         void tex_alloc(device_memory& mem)
392         {
393                 VLOG(1) << "Texture allocate: " << mem.name << ", "
394                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
395                         << string_human_readable_size(mem.memory_size()) << ")";
396
397                 if(mem.interpolation == INTERPOLATION_NONE) {
398                         /* Data texture. */
399                         kernel_tex_copy(&kernel_globals,
400                                                         mem.name,
401                                                         mem.host_pointer,
402                                                         mem.data_size);
403                 }
404                 else {
405                         /* Image Texture. */
406                         int flat_slot = 0;
407                         if(string_startswith(mem.name, "__tex_image")) {
408                                 int pos =  string(mem.name).rfind("_");
409                                 flat_slot = atoi(mem.name + pos + 1);
410                         }
411                         else {
412                                 assert(0);
413                         }
414
415                         if(flat_slot >= texture_info.size()) {
416                                 /* Allocate some slots in advance, to reduce amount
417                                  * of re-allocations. */
418                                 texture_info.resize(flat_slot + 128);
419                         }
420
421                         TextureInfo& info = texture_info[flat_slot];
422                         info.data = (uint64_t)mem.host_pointer;
423                         info.cl_buffer = 0;
424                         info.interpolation = mem.interpolation;
425                         info.extension = mem.extension;
426                         info.width = mem.data_width;
427                         info.height = mem.data_height;
428                         info.depth = mem.data_depth;
429
430                         need_texture_info = true;
431                 }
432
433                 mem.device_pointer = (device_ptr)mem.host_pointer;
434                 mem.device_size = mem.memory_size();
435                 stats.mem_alloc(mem.device_size);
436         }
437
438         void tex_free(device_memory& mem)
439         {
440                 if(mem.device_pointer) {
441                         mem.device_pointer = 0;
442                         stats.mem_free(mem.device_size);
443                         mem.device_size = 0;
444                         need_texture_info = true;
445                 }
446         }
447
448         void *osl_memory()
449         {
450 #ifdef WITH_OSL
451                 return &osl_globals;
452 #else
453                 return NULL;
454 #endif
455         }
456
457         void thread_run(DeviceTask *task)
458         {
459                 if(task->type == DeviceTask::RENDER) {
460                         thread_render(*task);
461                 }
462                 else if(task->type == DeviceTask::FILM_CONVERT)
463                         thread_film_convert(*task);
464                 else if(task->type == DeviceTask::SHADER)
465                         thread_shader(*task);
466         }
467
468         class CPUDeviceTask : public DeviceTask {
469         public:
470                 CPUDeviceTask(CPUDevice *device, DeviceTask& task)
471                 : DeviceTask(task)
472                 {
473                         run = function_bind(&CPUDevice::thread_run, device, this);
474                 }
475         };
476
477         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
478                                        DenoisingTask *task)
479         {
480                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_NON_LOCAL_MEANS);
481
482                 int4 rect = task->rect;
483                 int   r   = task->nlm_state.r;
484                 int   f   = task->nlm_state.f;
485                 float a   = task->nlm_state.a;
486                 float k_2 = task->nlm_state.k_2;
487
488                 int w = align_up(rect.z-rect.x, 4);
489                 int h = rect.w-rect.y;
490
491                 float *temporary_mem = (float*) task->buffer.temporary_mem.device_pointer;
492                 float *blurDifference = temporary_mem;
493                 float *difference     = temporary_mem + task->buffer.pass_stride;
494                 float *weightAccum    = temporary_mem + 2*task->buffer.pass_stride;
495
496                 memset(weightAccum, 0, sizeof(float)*w*h);
497                 memset((float*) out_ptr, 0, sizeof(float)*w*h);
498
499                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
500                         int dy = i / (2*r+1) - r;
501                         int dx = i % (2*r+1) - r;
502
503                         int local_rect[4] = {max(0, -dx), max(0, -dy), rect.z-rect.x - max(0, dx), rect.w-rect.y - max(0, dy)};
504                         filter_nlm_calc_difference_kernel()(dx, dy,
505                                                             (float*) guide_ptr,
506                                                             (float*) variance_ptr,
507                                                             difference,
508                                                             local_rect,
509                                                             w, 0,
510                                                             a, k_2);
511
512                         filter_nlm_blur_kernel()       (difference, blurDifference, local_rect, w, f);
513                         filter_nlm_calc_weight_kernel()(blurDifference, difference, local_rect, w, f);
514                         filter_nlm_blur_kernel()       (difference, blurDifference, local_rect, w, f);
515
516                         filter_nlm_update_output_kernel()(dx, dy,
517                                                           blurDifference,
518                                                           (float*) image_ptr,
519                                                           difference,
520                                                           (float*) out_ptr,
521                                                           weightAccum,
522                                                           local_rect,
523                                                           w, f);
524                 }
525
526                 int local_rect[4] = {0, 0, rect.z-rect.x, rect.w-rect.y};
527                 filter_nlm_normalize_kernel()((float*) out_ptr, weightAccum, local_rect, w);
528
529                 return true;
530         }
531
532         bool denoising_construct_transform(DenoisingTask *task)
533         {
534                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_CONSTRUCT_TRANSFORM);
535
536                 for(int y = 0; y < task->filter_area.w; y++) {
537                         for(int x = 0; x < task->filter_area.z; x++) {
538                                 filter_construct_transform_kernel()((float*) task->buffer.mem.device_pointer,
539                                                                     x + task->filter_area.x,
540                                                                     y + task->filter_area.y,
541                                                                     y*task->filter_area.z + x,
542                                                                     (float*) task->storage.transform.device_pointer,
543                                                                     (int*)   task->storage.rank.device_pointer,
544                                                                     &task->rect.x,
545                                                                     task->buffer.pass_stride,
546                                                                     task->radius,
547                                                                     task->pca_threshold);
548                         }
549                 }
550                 return true;
551         }
552
553         bool denoising_reconstruct(device_ptr color_ptr,
554                                    device_ptr color_variance_ptr,
555                                    device_ptr output_ptr,
556                                    DenoisingTask *task)
557         {
558                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_RECONSTRUCT);
559
560                 mem_zero(task->storage.XtWX);
561                 mem_zero(task->storage.XtWY);
562
563                 float *temporary_mem = (float*) task->buffer.temporary_mem.device_pointer;
564                 float *difference     = temporary_mem;
565                 float *blurDifference = temporary_mem + task->buffer.pass_stride;
566
567                 int r = task->radius;
568                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
569                         int dy = i / (2*r+1) - r;
570                         int dx = i % (2*r+1) - r;
571
572                         int local_rect[4] = {max(0, -dx), max(0, -dy),
573                                              task->reconstruction_state.source_w - max(0, dx),
574                                              task->reconstruction_state.source_h - max(0, dy)};
575                         filter_nlm_calc_difference_kernel()(dx, dy,
576                                                             (float*) color_ptr,
577                                                             (float*) color_variance_ptr,
578                                                             difference,
579                                                             local_rect,
580                                                             task->buffer.stride,
581                                                             task->buffer.pass_stride,
582                                                             1.0f,
583                                                             task->nlm_k_2);
584                         filter_nlm_blur_kernel()(difference, blurDifference, local_rect, task->buffer.stride, 4);
585                         filter_nlm_calc_weight_kernel()(blurDifference, difference, local_rect, task->buffer.stride, 4);
586                         filter_nlm_blur_kernel()(difference, blurDifference, local_rect, task->buffer.stride, 4);
587                         filter_nlm_construct_gramian_kernel()(dx, dy,
588                                                               blurDifference,
589                                                               (float*)  task->buffer.mem.device_pointer,
590                                                               (float*)  task->storage.transform.device_pointer,
591                                                               (int*)    task->storage.rank.device_pointer,
592                                                               (float*)  task->storage.XtWX.device_pointer,
593                                                               (float3*) task->storage.XtWY.device_pointer,
594                                                               local_rect,
595                                                               &task->reconstruction_state.filter_window.x,
596                                                               task->buffer.stride,
597                                                               4,
598                                                               task->buffer.pass_stride);
599                 }
600                 for(int y = 0; y < task->filter_area.w; y++) {
601                         for(int x = 0; x < task->filter_area.z; x++) {
602                                 filter_finalize_kernel()(x,
603                                                          y,
604                                                          y*task->filter_area.z + x,
605                                                          (float*)  output_ptr,
606                                                          (int*)    task->storage.rank.device_pointer,
607                                                          (float*)  task->storage.XtWX.device_pointer,
608                                                          (float3*) task->storage.XtWY.device_pointer,
609                                                          &task->reconstruction_state.buffer_params.x,
610                                                          task->render_buffer.samples);
611                         }
612                 }
613                 return true;
614         }
615
616         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
617                                       device_ptr mean_ptr, device_ptr variance_ptr,
618                                       int r, int4 rect, DenoisingTask *task)
619         {
620                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_COMBINE_HALVES);
621
622                 for(int y = rect.y; y < rect.w; y++) {
623                         for(int x = rect.x; x < rect.z; x++) {
624                                 filter_combine_halves_kernel()(x, y,
625                                                                (float*) mean_ptr,
626                                                                (float*) variance_ptr,
627                                                                (float*) a_ptr,
628                                                                (float*) b_ptr,
629                                                                &rect.x,
630                                                                r);
631                         }
632                 }
633                 return true;
634         }
635
636         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
637                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
638                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
639         {
640                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_DIVIDE_SHADOW);
641
642                 for(int y = task->rect.y; y < task->rect.w; y++) {
643                         for(int x = task->rect.x; x < task->rect.z; x++) {
644                                 filter_divide_shadow_kernel()(task->render_buffer.samples,
645                                                               task->tile_info,
646                                                               x, y,
647                                                               (float*) a_ptr,
648                                                               (float*) b_ptr,
649                                                               (float*) sample_variance_ptr,
650                                                               (float*) sv_variance_ptr,
651                                                               (float*) buffer_variance_ptr,
652                                                               &task->rect.x,
653                                                               task->render_buffer.pass_stride,
654                                                               task->render_buffer.offset);
655                         }
656                 }
657                 return true;
658         }
659
660         bool denoising_get_feature(int mean_offset,
661                                    int variance_offset,
662                                    device_ptr mean_ptr,
663                                    device_ptr variance_ptr,
664                                    DenoisingTask *task)
665         {
666                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_GET_FEATURE);
667
668                 for(int y = task->rect.y; y < task->rect.w; y++) {
669                         for(int x = task->rect.x; x < task->rect.z; x++) {
670                                 filter_get_feature_kernel()(task->render_buffer.samples,
671                                                             task->tile_info,
672                                                             mean_offset,
673                                                             variance_offset,
674                                                             x, y,
675                                                             (float*) mean_ptr,
676                                                             (float*) variance_ptr,
677                                                             &task->rect.x,
678                                                             task->render_buffer.pass_stride,
679                                                             task->render_buffer.offset);
680                         }
681                 }
682                 return true;
683         }
684
685         bool denoising_detect_outliers(device_ptr image_ptr,
686                                        device_ptr variance_ptr,
687                                        device_ptr depth_ptr,
688                                        device_ptr output_ptr,
689                                        DenoisingTask *task)
690         {
691                 ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_DETECT_OUTLIERS);
692
693                 for(int y = task->rect.y; y < task->rect.w; y++) {
694                         for(int x = task->rect.x; x < task->rect.z; x++) {
695                                 filter_detect_outliers_kernel()(x, y,
696                                                                 (float*) image_ptr,
697                                                                 (float*) variance_ptr,
698                                                                 (float*) depth_ptr,
699                                                                 (float*) output_ptr,
700                                                                 &task->rect.x,
701                                                                 task->buffer.pass_stride);
702                         }
703                 }
704                 return true;
705         }
706
707         void path_trace(DeviceTask &task, RenderTile &tile, KernelGlobals *kg)
708         {
709                 const bool use_coverage = kernel_data.film.cryptomatte_passes & CRYPT_ACCURATE;
710
711                 scoped_timer timer(&tile.buffers->render_time);
712
713                 Coverage coverage(kg, tile);
714                 if(use_coverage) {
715                         coverage.init_path_trace();
716                 }
717
718                 float *render_buffer = (float*)tile.buffer;
719                 int start_sample = tile.start_sample;
720                 int end_sample = tile.start_sample + tile.num_samples;
721
722                 _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
723                 _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
724
725                 for(int sample = start_sample; sample < end_sample; sample++) {
726                         if(task.get_cancel() || task_pool.canceled()) {
727                                 if(task.need_finish_queue == false)
728                                         break;
729                         }
730
731                         for(int y = tile.y; y < tile.y + tile.h; y++) {
732                                 for(int x = tile.x; x < tile.x + tile.w; x++) {
733                                         if(use_coverage) {
734                                                 coverage.init_pixel(x, y);
735                                         }
736                                         path_trace_kernel()(kg, render_buffer,
737                                                             sample, x, y, tile.offset, tile.stride);
738                                 }
739                         }
740
741                         tile.sample = sample + 1;
742
743                         task.update_progress(&tile, tile.w*tile.h);
744                 }
745                 if(use_coverage) {
746                         coverage.finalize();
747                 }
748         }
749
750         void denoise(DenoisingTask& denoising, RenderTile &tile)
751         {
752                 ProfilingHelper profiling(denoising.profiler, PROFILING_DENOISING);
753
754                 tile.sample = tile.start_sample + tile.num_samples;
755
756                 denoising.functions.construct_transform = function_bind(&CPUDevice::denoising_construct_transform, this, &denoising);
757                 denoising.functions.reconstruct = function_bind(&CPUDevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
758                 denoising.functions.divide_shadow = function_bind(&CPUDevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
759                 denoising.functions.non_local_means = function_bind(&CPUDevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
760                 denoising.functions.combine_halves = function_bind(&CPUDevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
761                 denoising.functions.get_feature = function_bind(&CPUDevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
762                 denoising.functions.detect_outliers = function_bind(&CPUDevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
763
764                 denoising.filter_area = make_int4(tile.x, tile.y, tile.w, tile.h);
765                 denoising.render_buffer.samples = tile.sample;
766                 denoising.buffer.gpu_temporary_mem = false;
767
768                 denoising.run_denoising(&tile);
769         }
770
771         void thread_render(DeviceTask& task)
772         {
773                 if(task_pool.canceled()) {
774                         if(task.need_finish_queue == false)
775                                 return;
776                 }
777
778                 /* allocate buffer for kernel globals */
779                 device_only_memory<KernelGlobals> kgbuffer(this, "kernel_globals");
780                 kgbuffer.alloc_to_device(1);
781
782                 KernelGlobals *kg = new ((void*) kgbuffer.device_pointer) KernelGlobals(thread_kernel_globals_init());
783
784                 profiler.add_state(&kg->profiler);
785
786                 CPUSplitKernel *split_kernel = NULL;
787                 if(use_split_kernel) {
788                         split_kernel = new CPUSplitKernel(this);
789                         if(!split_kernel->load_kernels(requested_features)) {
790                                 thread_kernel_globals_free((KernelGlobals*)kgbuffer.device_pointer);
791                                 kgbuffer.free();
792                                 delete split_kernel;
793                                 return;
794                         }
795                 }
796
797                 RenderTile tile;
798                 DenoisingTask denoising(this, task);
799                 denoising.profiler = &kg->profiler;
800
801                 while(task.acquire_tile(this, tile)) {
802                         if(tile.task == RenderTile::PATH_TRACE) {
803                                 if(use_split_kernel) {
804                                         device_only_memory<uchar> void_buffer(this, "void_buffer");
805                                         split_kernel->path_trace(&task, tile, kgbuffer, void_buffer);
806                                 }
807                                 else {
808                                         path_trace(task, tile, kg);
809                                 }
810                         }
811                         else if(tile.task == RenderTile::DENOISE) {
812                                 denoise(denoising, tile);
813                                 task.update_progress(&tile, tile.w*tile.h);
814                         }
815
816                         task.release_tile(tile);
817
818                         if(task_pool.canceled()) {
819                                 if(task.need_finish_queue == false)
820                                         break;
821                         }
822                 }
823
824                 profiler.remove_state(&kg->profiler);
825
826                 thread_kernel_globals_free((KernelGlobals*)kgbuffer.device_pointer);
827                 kg->~KernelGlobals();
828                 kgbuffer.free();
829                 delete split_kernel;
830         }
831
832         void thread_film_convert(DeviceTask& task)
833         {
834                 float sample_scale = 1.0f/(task.sample + 1);
835
836                 if(task.rgba_half) {
837                         for(int y = task.y; y < task.y + task.h; y++)
838                                 for(int x = task.x; x < task.x + task.w; x++)
839                                         convert_to_half_float_kernel()(&kernel_globals, (uchar4*)task.rgba_half, (float*)task.buffer,
840                                                                        sample_scale, x, y, task.offset, task.stride);
841                 }
842                 else {
843                         for(int y = task.y; y < task.y + task.h; y++)
844                                 for(int x = task.x; x < task.x + task.w; x++)
845                                         convert_to_byte_kernel()(&kernel_globals, (uchar4*)task.rgba_byte, (float*)task.buffer,
846                                                                  sample_scale, x, y, task.offset, task.stride);
847
848                 }
849         }
850
851         void thread_shader(DeviceTask& task)
852         {
853                 KernelGlobals kg = kernel_globals;
854
855 #ifdef WITH_OSL
856                 OSLShader::thread_init(&kg, &kernel_globals, &osl_globals);
857 #endif
858                 for(int sample = 0; sample < task.num_samples; sample++) {
859                         for(int x = task.shader_x; x < task.shader_x + task.shader_w; x++)
860                                 shader_kernel()(&kg,
861                                                 (uint4*)task.shader_input,
862                                                 (float4*)task.shader_output,
863                                                 task.shader_eval_type,
864                                                 task.shader_filter,
865                                                 x,
866                                                 task.offset,
867                                                 sample);
868
869                         if(task.get_cancel() || task_pool.canceled())
870                                 break;
871
872                         task.update_progress(NULL);
873
874                 }
875
876 #ifdef WITH_OSL
877                 OSLShader::thread_free(&kg);
878 #endif
879         }
880
881         int get_split_task_count(DeviceTask& task)
882         {
883                 if(task.type == DeviceTask::SHADER)
884                         return task.get_subtask_count(info.cpu_threads, 256);
885                 else
886                         return task.get_subtask_count(info.cpu_threads);
887         }
888
889         void task_add(DeviceTask& task)
890         {
891                 /* Load texture info. */
892                 load_texture_info();
893
894                 /* split task into smaller ones */
895                 list<DeviceTask> tasks;
896
897                 if(task.type == DeviceTask::SHADER)
898                         task.split(tasks, info.cpu_threads, 256);
899                 else
900                         task.split(tasks, info.cpu_threads);
901
902                 foreach(DeviceTask& task, tasks)
903                         task_pool.push(new CPUDeviceTask(this, task));
904         }
905
906         void task_wait()
907         {
908                 task_pool.wait_work();
909         }
910
911         void task_cancel()
912         {
913                 task_pool.cancel();
914         }
915
916 protected:
917         inline KernelGlobals thread_kernel_globals_init()
918         {
919                 KernelGlobals kg = kernel_globals;
920                 kg.transparent_shadow_intersections = NULL;
921                 const int decoupled_count = sizeof(kg.decoupled_volume_steps) /
922                                             sizeof(*kg.decoupled_volume_steps);
923                 for(int i = 0; i < decoupled_count; ++i) {
924                         kg.decoupled_volume_steps[i] = NULL;
925                 }
926                 kg.decoupled_volume_steps_index = 0;
927                 kg.coverage_asset = kg.coverage_object = kg.coverage_material = NULL;
928 #ifdef WITH_OSL
929                 OSLShader::thread_init(&kg, &kernel_globals, &osl_globals);
930 #endif
931                 return kg;
932         }
933
934         inline void thread_kernel_globals_free(KernelGlobals *kg)
935         {
936                 if(kg == NULL) {
937                         return;
938                 }
939
940                 if(kg->transparent_shadow_intersections != NULL) {
941                         free(kg->transparent_shadow_intersections);
942                 }
943                 const int decoupled_count = sizeof(kg->decoupled_volume_steps) /
944                                             sizeof(*kg->decoupled_volume_steps);
945                 for(int i = 0; i < decoupled_count; ++i) {
946                         if(kg->decoupled_volume_steps[i] != NULL) {
947                                 free(kg->decoupled_volume_steps[i]);
948                         }
949                 }
950 #ifdef WITH_OSL
951                 OSLShader::thread_free(kg);
952 #endif
953         }
954
955         virtual bool load_kernels(const DeviceRequestedFeatures& requested_features_) {
956                 requested_features = requested_features_;
957
958                 return true;
959         }
960 };
961
962 /* split kernel */
963
964 class CPUSplitKernelFunction : public SplitKernelFunction {
965 public:
966         CPUDevice* device;
967         void (*func)(KernelGlobals *kg, KernelData *data);
968
969         CPUSplitKernelFunction(CPUDevice* device) : device(device), func(NULL) {}
970         ~CPUSplitKernelFunction() {}
971
972         virtual bool enqueue(const KernelDimensions& dim, device_memory& kernel_globals, device_memory& data)
973         {
974                 if(!func) {
975                         return false;
976                 }
977
978                 KernelGlobals *kg = (KernelGlobals*)kernel_globals.device_pointer;
979                 kg->global_size = make_int2(dim.global_size[0], dim.global_size[1]);
980
981                 for(int y = 0; y < dim.global_size[1]; y++) {
982                         for(int x = 0; x < dim.global_size[0]; x++) {
983                                 kg->global_id = make_int2(x, y);
984
985                                 func(kg, (KernelData*)data.device_pointer);
986                         }
987                 }
988
989                 return true;
990         }
991 };
992
993 CPUSplitKernel::CPUSplitKernel(CPUDevice *device) : DeviceSplitKernel(device), device(device)
994 {
995 }
996
997 bool CPUSplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
998                                                     RenderTile& rtile,
999                                                     int num_global_elements,
1000                                                     device_memory& kernel_globals,
1001                                                     device_memory& data,
1002                                                     device_memory& split_data,
1003                                                     device_memory& ray_state,
1004                                                     device_memory& queue_index,
1005                                                     device_memory& use_queues_flags,
1006                                                     device_memory& work_pool_wgs)
1007 {
1008         KernelGlobals *kg = (KernelGlobals*)kernel_globals.device_pointer;
1009         kg->global_size = make_int2(dim.global_size[0], dim.global_size[1]);
1010
1011         for(int y = 0; y < dim.global_size[1]; y++) {
1012                 for(int x = 0; x < dim.global_size[0]; x++) {
1013                         kg->global_id = make_int2(x, y);
1014
1015                         device->data_init_kernel()((KernelGlobals*)kernel_globals.device_pointer,
1016                                                    (KernelData*)data.device_pointer,
1017                                                    (void*)split_data.device_pointer,
1018                                                    num_global_elements,
1019                                                    (char*)ray_state.device_pointer,
1020                                                    rtile.start_sample,
1021                                                    rtile.start_sample + rtile.num_samples,
1022                                                    rtile.x,
1023                                                    rtile.y,
1024                                                    rtile.w,
1025                                                    rtile.h,
1026                                                    rtile.offset,
1027                                                    rtile.stride,
1028                                                    (int*)queue_index.device_pointer,
1029                                                    dim.global_size[0] * dim.global_size[1],
1030                                                    (char*)use_queues_flags.device_pointer,
1031                                                    (uint*)work_pool_wgs.device_pointer,
1032                                                    rtile.num_samples,
1033                                                    (float*)rtile.buffer);
1034                 }
1035         }
1036
1037         return true;
1038 }
1039
1040 SplitKernelFunction* CPUSplitKernel::get_split_kernel_function(const string& kernel_name,
1041                                                                const DeviceRequestedFeatures&)
1042 {
1043         CPUSplitKernelFunction *kernel = new CPUSplitKernelFunction(device);
1044
1045         kernel->func = device->split_kernels[kernel_name]();
1046         if(!kernel->func) {
1047                 delete kernel;
1048                 return NULL;
1049         }
1050
1051         return kernel;
1052 }
1053
1054 int2 CPUSplitKernel::split_kernel_local_size()
1055 {
1056         return make_int2(1, 1);
1057 }
1058
1059 int2 CPUSplitKernel::split_kernel_global_size(device_memory& /*kg*/, device_memory& /*data*/, DeviceTask * /*task*/) {
1060         return make_int2(1, 1);
1061 }
1062
1063 uint64_t CPUSplitKernel::state_buffer_size(device_memory& kernel_globals, device_memory& /*data*/, size_t num_threads) {
1064         KernelGlobals *kg = (KernelGlobals*)kernel_globals.device_pointer;
1065
1066         return split_data_buffer_size(kg, num_threads);
1067 }
1068
1069 Device *device_cpu_create(DeviceInfo& info, Stats &stats, Profiler &profiler, bool background)
1070 {
1071         return new CPUDevice(info, stats, profiler, background);
1072 }
1073
1074 void device_cpu_info(vector<DeviceInfo>& devices)
1075 {
1076         DeviceInfo info;
1077
1078         info.type = DEVICE_CPU;
1079         info.description = system_cpu_brand_string();
1080         info.id = "CPU";
1081         info.num = 0;
1082         info.advanced_shading = true;
1083         info.has_volume_decoupled = true;
1084         info.has_osl = true;
1085         info.has_half_images = true;
1086         info.has_profiling = true;
1087
1088         devices.insert(devices.begin(), info);
1089 }
1090
1091 string device_cpu_capabilities()
1092 {
1093         string capabilities = "";
1094         capabilities += system_cpu_support_sse2() ? "SSE2 " : "";
1095         capabilities += system_cpu_support_sse3() ? "SSE3 " : "";
1096         capabilities += system_cpu_support_sse41() ? "SSE41 " : "";
1097         capabilities += system_cpu_support_avx() ? "AVX " : "";
1098         capabilities += system_cpu_support_avx2() ? "AVX2" : "";
1099         if(capabilities[capabilities.size() - 1] == ' ')
1100                 capabilities.resize(capabilities.size() - 1);
1101         return capabilities;
1102 }
1103
1104 CCL_NAMESPACE_END