Cleanup: simplify transform orientation cycling
[blender.git] / intern / cycles / device / device_cpu.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdlib.h>
18 #include <string.h>
19
20 /* So ImathMath is included before our kernel_cpu_compat. */
21 #ifdef WITH_OSL
22 /* So no context pollution happens from indirectly included windows.h */
23 #  include "util/util_windows.h"
24 #  include <OSL/oslexec.h>
25 #endif
26
27 #include "device/device.h"
28 #include "device/device_denoising.h"
29 #include "device/device_intern.h"
30 #include "device/device_split_kernel.h"
31
32 #include "kernel/kernel.h"
33 #include "kernel/kernel_compat_cpu.h"
34 #include "kernel/kernel_types.h"
35 #include "kernel/split/kernel_split_data.h"
36 #include "kernel/kernel_globals.h"
37
38 #include "kernel/filter/filter.h"
39
40 #include "kernel/osl/osl_shader.h"
41 #include "kernel/osl/osl_globals.h"
42
43 #include "render/buffers.h"
44 #include "render/coverage.h"
45
46 #include "util/util_debug.h"
47 #include "util/util_foreach.h"
48 #include "util/util_function.h"
49 #include "util/util_logging.h"
50 #include "util/util_map.h"
51 #include "util/util_opengl.h"
52 #include "util/util_optimization.h"
53 #include "util/util_progress.h"
54 #include "util/util_system.h"
55 #include "util/util_thread.h"
56
57 CCL_NAMESPACE_BEGIN
58
59 class CPUDevice;
60
61 /* Has to be outside of the class to be shared across template instantiations. */
62 static const char *logged_architecture = "";
63
64 template<typename F>
65 class KernelFunctions {
66 public:
67         KernelFunctions()
68         {
69                 kernel = (F)NULL;
70         }
71
72         KernelFunctions(F kernel_default,
73                         F kernel_sse2,
74                         F kernel_sse3,
75                         F kernel_sse41,
76                         F kernel_avx,
77                         F kernel_avx2)
78         {
79                 const char *architecture_name = "default";
80                 kernel = kernel_default;
81
82                 /* Silence potential warnings about unused variables
83                  * when compiling without some architectures. */
84                 (void) kernel_sse2;
85                 (void) kernel_sse3;
86                 (void) kernel_sse41;
87                 (void) kernel_avx;
88                 (void) kernel_avx2;
89 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX2
90                 if(DebugFlags().cpu.has_avx2() && system_cpu_support_avx2()) {
91                         architecture_name = "AVX2";
92                         kernel = kernel_avx2;
93                 }
94                 else
95 #endif
96 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX
97                 if(DebugFlags().cpu.has_avx() && system_cpu_support_avx()) {
98                         architecture_name = "AVX";
99                         kernel = kernel_avx;
100                 }
101                 else
102 #endif
103 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
104                 if(DebugFlags().cpu.has_sse41() && system_cpu_support_sse41()) {
105                         architecture_name = "SSE4.1";
106                         kernel = kernel_sse41;
107                 }
108                 else
109 #endif
110 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE3
111                 if(DebugFlags().cpu.has_sse3() && system_cpu_support_sse3()) {
112                         architecture_name = "SSE3";
113                         kernel = kernel_sse3;
114                 }
115                 else
116 #endif
117 #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE2
118                 if(DebugFlags().cpu.has_sse2() && system_cpu_support_sse2()) {
119                         architecture_name = "SSE2";
120                         kernel = kernel_sse2;
121                 }
122 #endif
123
124                 if(strcmp(architecture_name, logged_architecture) != 0) {
125                         VLOG(1) << "Will be using " << architecture_name << " kernels.";
126                         logged_architecture = architecture_name;
127                 }
128         }
129
130         inline F operator()() const {
131                 assert(kernel);
132                 return kernel;
133         }
134 protected:
135         F kernel;
136 };
137
138 class CPUSplitKernel : public DeviceSplitKernel {
139         CPUDevice *device;
140 public:
141         explicit CPUSplitKernel(CPUDevice *device);
142
143         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
144                                                     RenderTile& rtile,
145                                                     int num_global_elements,
146                                                     device_memory& kernel_globals,
147                                                     device_memory& kernel_data_,
148                                                     device_memory& split_data,
149                                                     device_memory& ray_state,
150                                                     device_memory& queue_index,
151                                                     device_memory& use_queues_flag,
152                                                     device_memory& work_pool_wgs);
153
154         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
155                                                                const DeviceRequestedFeatures&);
156         virtual int2 split_kernel_local_size();
157         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
158         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
159 };
160
161 class CPUDevice : public Device
162 {
163 public:
164         TaskPool task_pool;
165         KernelGlobals kernel_globals;
166
167         device_vector<TextureInfo> texture_info;
168         bool need_texture_info;
169
170 #ifdef WITH_OSL
171         OSLGlobals osl_globals;
172 #endif
173
174         bool use_split_kernel;
175
176         DeviceRequestedFeatures requested_features;
177
178         KernelFunctions<void(*)(KernelGlobals *, float *, int, int, int, int, int)>             path_trace_kernel;
179         KernelFunctions<void(*)(KernelGlobals *, uchar4 *, float *, float, int, int, int, int)> convert_to_half_float_kernel;
180         KernelFunctions<void(*)(KernelGlobals *, uchar4 *, float *, float, int, int, int, int)> convert_to_byte_kernel;
181         KernelFunctions<void(*)(KernelGlobals *, uint4 *, float4 *, int, int, int, int, int)>   shader_kernel;
182
183         KernelFunctions<void(*)(int, TileInfo*, int, int, float*, float*, float*, float*, float*, int*, int, int)> filter_divide_shadow_kernel;
184         KernelFunctions<void(*)(int, TileInfo*, int, int, int, int, float*, float*, int*, int, int)>               filter_get_feature_kernel;
185         KernelFunctions<void(*)(int, int, float*, float*, float*, float*, int*, int)>                               filter_detect_outliers_kernel;
186         KernelFunctions<void(*)(int, int, float*, float*, float*, float*, int*, int)>                               filter_combine_halves_kernel;
187
188         KernelFunctions<void(*)(int, int, float*, float*, float*, int*, int, int, float, float)>   filter_nlm_calc_difference_kernel;
189         KernelFunctions<void(*)(float*, float*, int*, int, int)>                                   filter_nlm_blur_kernel;
190         KernelFunctions<void(*)(float*, float*, int*, int, int)>                                   filter_nlm_calc_weight_kernel;
191         KernelFunctions<void(*)(int, int, float*, float*, float*, float*, float*, int*, int, int)> filter_nlm_update_output_kernel;
192         KernelFunctions<void(*)(float*, float*, int*, int)>                                        filter_nlm_normalize_kernel;
193
194         KernelFunctions<void(*)(float*, int, int, int, float*, int*, int*, int, int, float)>                         filter_construct_transform_kernel;
195         KernelFunctions<void(*)(int, int, float*, float*, float*, int*, float*, float3*, int*, int*, int, int, int)> filter_nlm_construct_gramian_kernel;
196         KernelFunctions<void(*)(int, int, int, float*, int*, float*, float3*, int*, int)>                            filter_finalize_kernel;
197
198         KernelFunctions<void(*)(KernelGlobals *, ccl_constant KernelData*, ccl_global void*, int, ccl_global char*,
199                                int, int, int, int, int, int, int, int, ccl_global int*, int,
200                                ccl_global char*, ccl_global unsigned int*, unsigned int, ccl_global float*)>        data_init_kernel;
201         unordered_map<string, KernelFunctions<void(*)(KernelGlobals*, KernelData*)> > split_kernels;
202
203 #define KERNEL_FUNCTIONS(name) \
204               KERNEL_NAME_EVAL(cpu, name), \
205               KERNEL_NAME_EVAL(cpu_sse2, name), \
206               KERNEL_NAME_EVAL(cpu_sse3, name), \
207               KERNEL_NAME_EVAL(cpu_sse41, name), \
208               KERNEL_NAME_EVAL(cpu_avx, name), \
209               KERNEL_NAME_EVAL(cpu_avx2, name)
210
211         CPUDevice(DeviceInfo& info_, Stats &stats_, bool background_)
212         : Device(info_, stats_, background_),
213           texture_info(this, "__texture_info", MEM_TEXTURE),
214 #define REGISTER_KERNEL(name) name ## _kernel(KERNEL_FUNCTIONS(name))
215           REGISTER_KERNEL(path_trace),
216           REGISTER_KERNEL(convert_to_half_float),
217           REGISTER_KERNEL(convert_to_byte),
218           REGISTER_KERNEL(shader),
219           REGISTER_KERNEL(filter_divide_shadow),
220           REGISTER_KERNEL(filter_get_feature),
221           REGISTER_KERNEL(filter_detect_outliers),
222           REGISTER_KERNEL(filter_combine_halves),
223           REGISTER_KERNEL(filter_nlm_calc_difference),
224           REGISTER_KERNEL(filter_nlm_blur),
225           REGISTER_KERNEL(filter_nlm_calc_weight),
226           REGISTER_KERNEL(filter_nlm_update_output),
227           REGISTER_KERNEL(filter_nlm_normalize),
228           REGISTER_KERNEL(filter_construct_transform),
229           REGISTER_KERNEL(filter_nlm_construct_gramian),
230           REGISTER_KERNEL(filter_finalize),
231           REGISTER_KERNEL(data_init)
232 #undef REGISTER_KERNEL
233         {
234                 if(info.cpu_threads == 0) {
235                         info.cpu_threads = TaskScheduler::num_threads();
236                 }
237
238 #ifdef WITH_OSL
239                 kernel_globals.osl = &osl_globals;
240 #endif
241                 use_split_kernel = DebugFlags().cpu.split_kernel;
242                 if(use_split_kernel) {
243                         VLOG(1) << "Will be using split kernel.";
244                 }
245                 need_texture_info = false;
246
247 #define REGISTER_SPLIT_KERNEL(name) split_kernels[#name] = KernelFunctions<void(*)(KernelGlobals*, KernelData*)>(KERNEL_FUNCTIONS(name))
248                 REGISTER_SPLIT_KERNEL(path_init);
249                 REGISTER_SPLIT_KERNEL(scene_intersect);
250                 REGISTER_SPLIT_KERNEL(lamp_emission);
251                 REGISTER_SPLIT_KERNEL(do_volume);
252                 REGISTER_SPLIT_KERNEL(queue_enqueue);
253                 REGISTER_SPLIT_KERNEL(indirect_background);
254                 REGISTER_SPLIT_KERNEL(shader_setup);
255                 REGISTER_SPLIT_KERNEL(shader_sort);
256                 REGISTER_SPLIT_KERNEL(shader_eval);
257                 REGISTER_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao);
258                 REGISTER_SPLIT_KERNEL(subsurface_scatter);
259                 REGISTER_SPLIT_KERNEL(direct_lighting);
260                 REGISTER_SPLIT_KERNEL(shadow_blocked_ao);
261                 REGISTER_SPLIT_KERNEL(shadow_blocked_dl);
262                 REGISTER_SPLIT_KERNEL(enqueue_inactive);
263                 REGISTER_SPLIT_KERNEL(next_iteration_setup);
264                 REGISTER_SPLIT_KERNEL(indirect_subsurface);
265                 REGISTER_SPLIT_KERNEL(buffer_update);
266 #undef REGISTER_SPLIT_KERNEL
267 #undef KERNEL_FUNCTIONS
268         }
269
270         ~CPUDevice()
271         {
272                 task_pool.stop();
273                 texture_info.free();
274         }
275
276         virtual bool show_samples() const
277         {
278                 return (info.cpu_threads == 1);
279         }
280
281         virtual BVHLayoutMask get_bvh_layout_mask() const {
282                 BVHLayoutMask bvh_layout_mask = BVH_LAYOUT_BVH2;
283                 if(DebugFlags().cpu.has_sse2() && system_cpu_support_sse2()) {
284                         bvh_layout_mask |= BVH_LAYOUT_BVH4;
285                 }
286                 if(DebugFlags().cpu.has_avx2() && system_cpu_support_avx2()) {
287                         bvh_layout_mask |= BVH_LAYOUT_BVH8;
288                 }
289 #ifdef WITH_EMBREE
290                 bvh_layout_mask |= BVH_LAYOUT_EMBREE;
291 #endif  /* WITH_EMBREE */
292                 return bvh_layout_mask;
293         }
294
295         void load_texture_info()
296         {
297                 if(need_texture_info) {
298                         texture_info.copy_to_device();
299                         need_texture_info = false;
300                 }
301         }
302
303         void mem_alloc(device_memory& mem)
304         {
305                 if(mem.type == MEM_TEXTURE) {
306                         assert(!"mem_alloc not supported for textures.");
307                 }
308                 else {
309                         if(mem.name) {
310                                 VLOG(1) << "Buffer allocate: " << mem.name << ", "
311                                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
312                                                 << string_human_readable_size(mem.memory_size()) << ")";
313                         }
314
315                         if(mem.type == MEM_DEVICE_ONLY) {
316                                 assert(!mem.host_pointer);
317                                 size_t alignment = MIN_ALIGNMENT_CPU_DATA_TYPES;
318                                 void *data = util_aligned_malloc(mem.memory_size(), alignment);
319                                 mem.device_pointer = (device_ptr)data;
320                         }
321                         else {
322                                 mem.device_pointer = (device_ptr)mem.host_pointer;
323                         }
324
325                         mem.device_size = mem.memory_size();
326                         stats.mem_alloc(mem.device_size);
327                 }
328         }
329
330         void mem_copy_to(device_memory& mem)
331         {
332                 if(mem.type == MEM_TEXTURE) {
333                         tex_free(mem);
334                         tex_alloc(mem);
335                 }
336                 else if(mem.type == MEM_PIXELS) {
337                         assert(!"mem_copy_to not supported for pixels.");
338                 }
339                 else {
340                         if(!mem.device_pointer) {
341                                 mem_alloc(mem);
342                         }
343
344                         /* copy is no-op */
345                 }
346         }
347
348         void mem_copy_from(device_memory& /*mem*/,
349                            int /*y*/, int /*w*/, int /*h*/,
350                            int /*elem*/)
351         {
352                 /* no-op */
353         }
354
355         void mem_zero(device_memory& mem)
356         {
357                 if(!mem.device_pointer) {
358                         mem_alloc(mem);
359                 }
360
361                 if(mem.device_pointer) {
362                         memset((void*)mem.device_pointer, 0, mem.memory_size());
363                 }
364         }
365
366         void mem_free(device_memory& mem)
367         {
368                 if(mem.type == MEM_TEXTURE) {
369                         tex_free(mem);
370                 }
371                 else if(mem.device_pointer) {
372                         if(mem.type == MEM_DEVICE_ONLY) {
373                                 util_aligned_free((void*)mem.device_pointer);
374                         }
375                         mem.device_pointer = 0;
376                         stats.mem_free(mem.device_size);
377                         mem.device_size = 0;
378                 }
379         }
380
381         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
382         {
383                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
384         }
385
386         void const_copy_to(const char *name, void *host, size_t size)
387         {
388                 kernel_const_copy(&kernel_globals, name, host, size);
389         }
390
391         void tex_alloc(device_memory& mem)
392         {
393                 VLOG(1) << "Texture allocate: " << mem.name << ", "
394                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
395                         << string_human_readable_size(mem.memory_size()) << ")";
396
397                 if(mem.interpolation == INTERPOLATION_NONE) {
398                         /* Data texture. */
399                         kernel_tex_copy(&kernel_globals,
400                                                         mem.name,
401                                                         mem.host_pointer,
402                                                         mem.data_size);
403                 }
404                 else {
405                         /* Image Texture. */
406                         int flat_slot = 0;
407                         if(string_startswith(mem.name, "__tex_image")) {
408                                 int pos =  string(mem.name).rfind("_");
409                                 flat_slot = atoi(mem.name + pos + 1);
410                         }
411                         else {
412                                 assert(0);
413                         }
414
415                         if(flat_slot >= texture_info.size()) {
416                                 /* Allocate some slots in advance, to reduce amount
417                                  * of re-allocations. */
418                                 texture_info.resize(flat_slot + 128);
419                         }
420
421                         TextureInfo& info = texture_info[flat_slot];
422                         info.data = (uint64_t)mem.host_pointer;
423                         info.cl_buffer = 0;
424                         info.interpolation = mem.interpolation;
425                         info.extension = mem.extension;
426                         info.width = mem.data_width;
427                         info.height = mem.data_height;
428                         info.depth = mem.data_depth;
429
430                         need_texture_info = true;
431                 }
432
433                 mem.device_pointer = (device_ptr)mem.host_pointer;
434                 mem.device_size = mem.memory_size();
435                 stats.mem_alloc(mem.device_size);
436         }
437
438         void tex_free(device_memory& mem)
439         {
440                 if(mem.device_pointer) {
441                         mem.device_pointer = 0;
442                         stats.mem_free(mem.device_size);
443                         mem.device_size = 0;
444                         need_texture_info = true;
445                 }
446         }
447
448         void *osl_memory()
449         {
450 #ifdef WITH_OSL
451                 return &osl_globals;
452 #else
453                 return NULL;
454 #endif
455         }
456
457         void thread_run(DeviceTask *task)
458         {
459                 if(task->type == DeviceTask::RENDER) {
460                         thread_render(*task);
461                 }
462                 else if(task->type == DeviceTask::FILM_CONVERT)
463                         thread_film_convert(*task);
464                 else if(task->type == DeviceTask::SHADER)
465                         thread_shader(*task);
466         }
467
468         class CPUDeviceTask : public DeviceTask {
469         public:
470                 CPUDeviceTask(CPUDevice *device, DeviceTask& task)
471                 : DeviceTask(task)
472                 {
473                         run = function_bind(&CPUDevice::thread_run, device, this);
474                 }
475         };
476
477         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
478                                        DenoisingTask *task)
479         {
480                 int4 rect = task->rect;
481                 int   r   = task->nlm_state.r;
482                 int   f   = task->nlm_state.f;
483                 float a   = task->nlm_state.a;
484                 float k_2 = task->nlm_state.k_2;
485
486                 int w = align_up(rect.z-rect.x, 4);
487                 int h = rect.w-rect.y;
488
489                 float *temporary_mem = (float*) task->buffer.temporary_mem.device_pointer;
490                 float *blurDifference = temporary_mem;
491                 float *difference     = temporary_mem + task->buffer.pass_stride;
492                 float *weightAccum    = temporary_mem + 2*task->buffer.pass_stride;
493
494                 memset(weightAccum, 0, sizeof(float)*w*h);
495                 memset((float*) out_ptr, 0, sizeof(float)*w*h);
496
497                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
498                         int dy = i / (2*r+1) - r;
499                         int dx = i % (2*r+1) - r;
500
501                         int local_rect[4] = {max(0, -dx), max(0, -dy), rect.z-rect.x - max(0, dx), rect.w-rect.y - max(0, dy)};
502                         filter_nlm_calc_difference_kernel()(dx, dy,
503                                                             (float*) guide_ptr,
504                                                             (float*) variance_ptr,
505                                                             difference,
506                                                             local_rect,
507                                                             w, 0,
508                                                             a, k_2);
509
510                         filter_nlm_blur_kernel()       (difference, blurDifference, local_rect, w, f);
511                         filter_nlm_calc_weight_kernel()(blurDifference, difference, local_rect, w, f);
512                         filter_nlm_blur_kernel()       (difference, blurDifference, local_rect, w, f);
513
514                         filter_nlm_update_output_kernel()(dx, dy,
515                                                           blurDifference,
516                                                           (float*) image_ptr,
517                                                           difference,
518                                                           (float*) out_ptr,
519                                                           weightAccum,
520                                                           local_rect,
521                                                           w, f);
522                 }
523
524                 int local_rect[4] = {0, 0, rect.z-rect.x, rect.w-rect.y};
525                 filter_nlm_normalize_kernel()((float*) out_ptr, weightAccum, local_rect, w);
526
527                 return true;
528         }
529
530         bool denoising_construct_transform(DenoisingTask *task)
531         {
532                 for(int y = 0; y < task->filter_area.w; y++) {
533                         for(int x = 0; x < task->filter_area.z; x++) {
534                                 filter_construct_transform_kernel()((float*) task->buffer.mem.device_pointer,
535                                                                     x + task->filter_area.x,
536                                                                     y + task->filter_area.y,
537                                                                     y*task->filter_area.z + x,
538                                                                     (float*) task->storage.transform.device_pointer,
539                                                                     (int*)   task->storage.rank.device_pointer,
540                                                                     &task->rect.x,
541                                                                     task->buffer.pass_stride,
542                                                                     task->radius,
543                                                                     task->pca_threshold);
544                         }
545                 }
546                 return true;
547         }
548
549         bool denoising_reconstruct(device_ptr color_ptr,
550                                    device_ptr color_variance_ptr,
551                                    device_ptr output_ptr,
552                                    DenoisingTask *task)
553         {
554                 mem_zero(task->storage.XtWX);
555                 mem_zero(task->storage.XtWY);
556
557                 float *temporary_mem = (float*) task->buffer.temporary_mem.device_pointer;
558                 float *difference     = temporary_mem;
559                 float *blurDifference = temporary_mem + task->buffer.pass_stride;
560
561                 int r = task->radius;
562                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
563                         int dy = i / (2*r+1) - r;
564                         int dx = i % (2*r+1) - r;
565
566                         int local_rect[4] = {max(0, -dx), max(0, -dy),
567                                              task->reconstruction_state.source_w - max(0, dx),
568                                              task->reconstruction_state.source_h - max(0, dy)};
569                         filter_nlm_calc_difference_kernel()(dx, dy,
570                                                             (float*) color_ptr,
571                                                             (float*) color_variance_ptr,
572                                                             difference,
573                                                             local_rect,
574                                                             task->buffer.stride,
575                                                             task->buffer.pass_stride,
576                                                             1.0f,
577                                                             task->nlm_k_2);
578                         filter_nlm_blur_kernel()(difference, blurDifference, local_rect, task->buffer.stride, 4);
579                         filter_nlm_calc_weight_kernel()(blurDifference, difference, local_rect, task->buffer.stride, 4);
580                         filter_nlm_blur_kernel()(difference, blurDifference, local_rect, task->buffer.stride, 4);
581                         filter_nlm_construct_gramian_kernel()(dx, dy,
582                                                               blurDifference,
583                                                               (float*)  task->buffer.mem.device_pointer,
584                                                               (float*)  task->storage.transform.device_pointer,
585                                                               (int*)    task->storage.rank.device_pointer,
586                                                               (float*)  task->storage.XtWX.device_pointer,
587                                                               (float3*) task->storage.XtWY.device_pointer,
588                                                               local_rect,
589                                                               &task->reconstruction_state.filter_window.x,
590                                                               task->buffer.stride,
591                                                               4,
592                                                               task->buffer.pass_stride);
593                 }
594                 for(int y = 0; y < task->filter_area.w; y++) {
595                         for(int x = 0; x < task->filter_area.z; x++) {
596                                 filter_finalize_kernel()(x,
597                                                          y,
598                                                          y*task->filter_area.z + x,
599                                                          (float*)  output_ptr,
600                                                          (int*)    task->storage.rank.device_pointer,
601                                                          (float*)  task->storage.XtWX.device_pointer,
602                                                          (float3*) task->storage.XtWY.device_pointer,
603                                                          &task->reconstruction_state.buffer_params.x,
604                                                          task->render_buffer.samples);
605                         }
606                 }
607                 return true;
608         }
609
610         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
611                                       device_ptr mean_ptr, device_ptr variance_ptr,
612                                       int r, int4 rect, DenoisingTask * /*task*/)
613         {
614                 for(int y = rect.y; y < rect.w; y++) {
615                         for(int x = rect.x; x < rect.z; x++) {
616                                 filter_combine_halves_kernel()(x, y,
617                                                                (float*) mean_ptr,
618                                                                (float*) variance_ptr,
619                                                                (float*) a_ptr,
620                                                                (float*) b_ptr,
621                                                                &rect.x,
622                                                                r);
623                         }
624                 }
625                 return true;
626         }
627
628         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
629                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
630                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
631         {
632                 for(int y = task->rect.y; y < task->rect.w; y++) {
633                         for(int x = task->rect.x; x < task->rect.z; x++) {
634                                 filter_divide_shadow_kernel()(task->render_buffer.samples,
635                                                               task->tile_info,
636                                                               x, y,
637                                                               (float*) a_ptr,
638                                                               (float*) b_ptr,
639                                                               (float*) sample_variance_ptr,
640                                                               (float*) sv_variance_ptr,
641                                                               (float*) buffer_variance_ptr,
642                                                               &task->rect.x,
643                                                               task->render_buffer.pass_stride,
644                                                               task->render_buffer.offset);
645                         }
646                 }
647                 return true;
648         }
649
650         bool denoising_get_feature(int mean_offset,
651                                    int variance_offset,
652                                    device_ptr mean_ptr,
653                                    device_ptr variance_ptr,
654                                    DenoisingTask *task)
655         {
656                 for(int y = task->rect.y; y < task->rect.w; y++) {
657                         for(int x = task->rect.x; x < task->rect.z; x++) {
658                                 filter_get_feature_kernel()(task->render_buffer.samples,
659                                                             task->tile_info,
660                                                             mean_offset,
661                                                             variance_offset,
662                                                             x, y,
663                                                             (float*) mean_ptr,
664                                                             (float*) variance_ptr,
665                                                             &task->rect.x,
666                                                             task->render_buffer.pass_stride,
667                                                             task->render_buffer.offset);
668                         }
669                 }
670                 return true;
671         }
672
673         bool denoising_detect_outliers(device_ptr image_ptr,
674                                        device_ptr variance_ptr,
675                                        device_ptr depth_ptr,
676                                        device_ptr output_ptr,
677                                        DenoisingTask *task)
678         {
679                 for(int y = task->rect.y; y < task->rect.w; y++) {
680                         for(int x = task->rect.x; x < task->rect.z; x++) {
681                                 filter_detect_outliers_kernel()(x, y,
682                                                                 (float*) image_ptr,
683                                                                 (float*) variance_ptr,
684                                                                 (float*) depth_ptr,
685                                                                 (float*) output_ptr,
686                                                                 &task->rect.x,
687                                                                 task->buffer.pass_stride);
688                         }
689                 }
690                 return true;
691         }
692
693         void path_trace(DeviceTask &task, RenderTile &tile, KernelGlobals *kg)
694         {
695                 const bool use_coverage = kernel_data.film.cryptomatte_passes & CRYPT_ACCURATE;
696
697                 scoped_timer timer(&tile.buffers->render_time);
698
699                 Coverage coverage(kg, tile);
700                 if(use_coverage) {
701                         coverage.init_path_trace();
702                 }
703
704                 float *render_buffer = (float*)tile.buffer;
705                 int start_sample = tile.start_sample;
706                 int end_sample = tile.start_sample + tile.num_samples;
707
708                 _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
709                 _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
710
711                 for(int sample = start_sample; sample < end_sample; sample++) {
712                         if(task.get_cancel() || task_pool.canceled()) {
713                                 if(task.need_finish_queue == false)
714                                         break;
715                         }
716
717                         for(int y = tile.y; y < tile.y + tile.h; y++) {
718                                 for(int x = tile.x; x < tile.x + tile.w; x++) {
719                                         if(use_coverage) {
720                                                 coverage.init_pixel(x, y);
721                                         }
722                                         path_trace_kernel()(kg, render_buffer,
723                                                             sample, x, y, tile.offset, tile.stride);
724                                 }
725                         }
726
727                         tile.sample = sample + 1;
728
729                         task.update_progress(&tile, tile.w*tile.h);
730                 }
731                 if(use_coverage) {
732                         coverage.finalize();
733                 }
734         }
735
736         void denoise(DenoisingTask& denoising, RenderTile &tile)
737         {
738                 tile.sample = tile.start_sample + tile.num_samples;
739
740                 denoising.functions.construct_transform = function_bind(&CPUDevice::denoising_construct_transform, this, &denoising);
741                 denoising.functions.reconstruct = function_bind(&CPUDevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
742                 denoising.functions.divide_shadow = function_bind(&CPUDevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
743                 denoising.functions.non_local_means = function_bind(&CPUDevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
744                 denoising.functions.combine_halves = function_bind(&CPUDevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
745                 denoising.functions.get_feature = function_bind(&CPUDevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
746                 denoising.functions.detect_outliers = function_bind(&CPUDevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
747
748                 denoising.filter_area = make_int4(tile.x, tile.y, tile.w, tile.h);
749                 denoising.render_buffer.samples = tile.sample;
750                 denoising.buffer.gpu_temporary_mem = false;
751
752                 denoising.run_denoising(&tile);
753         }
754
755         void thread_render(DeviceTask& task)
756         {
757                 if(task_pool.canceled()) {
758                         if(task.need_finish_queue == false)
759                                 return;
760                 }
761
762                 /* allocate buffer for kernel globals */
763                 device_only_memory<KernelGlobals> kgbuffer(this, "kernel_globals");
764                 kgbuffer.alloc_to_device(1);
765
766                 KernelGlobals *kg = new ((void*) kgbuffer.device_pointer) KernelGlobals(thread_kernel_globals_init());
767
768                 CPUSplitKernel *split_kernel = NULL;
769                 if(use_split_kernel) {
770                         split_kernel = new CPUSplitKernel(this);
771                         if(!split_kernel->load_kernels(requested_features)) {
772                                 thread_kernel_globals_free((KernelGlobals*)kgbuffer.device_pointer);
773                                 kgbuffer.free();
774                                 delete split_kernel;
775                                 return;
776                         }
777                 }
778
779                 RenderTile tile;
780                 DenoisingTask denoising(this, task);
781
782                 while(task.acquire_tile(this, tile)) {
783                         if(tile.task == RenderTile::PATH_TRACE) {
784                                 if(use_split_kernel) {
785                                         device_only_memory<uchar> void_buffer(this, "void_buffer");
786                                         split_kernel->path_trace(&task, tile, kgbuffer, void_buffer);
787                                 }
788                                 else {
789                                         path_trace(task, tile, kg);
790                                 }
791                         }
792                         else if(tile.task == RenderTile::DENOISE) {
793                                 denoise(denoising, tile);
794                                 task.update_progress(&tile, tile.w*tile.h);
795                         }
796
797                         task.release_tile(tile);
798
799                         if(task_pool.canceled()) {
800                                 if(task.need_finish_queue == false)
801                                         break;
802                         }
803                 }
804
805                 thread_kernel_globals_free((KernelGlobals*)kgbuffer.device_pointer);
806                 kg->~KernelGlobals();
807                 kgbuffer.free();
808                 delete split_kernel;
809         }
810
811         void thread_film_convert(DeviceTask& task)
812         {
813                 float sample_scale = 1.0f/(task.sample + 1);
814
815                 if(task.rgba_half) {
816                         for(int y = task.y; y < task.y + task.h; y++)
817                                 for(int x = task.x; x < task.x + task.w; x++)
818                                         convert_to_half_float_kernel()(&kernel_globals, (uchar4*)task.rgba_half, (float*)task.buffer,
819                                                                        sample_scale, x, y, task.offset, task.stride);
820                 }
821                 else {
822                         for(int y = task.y; y < task.y + task.h; y++)
823                                 for(int x = task.x; x < task.x + task.w; x++)
824                                         convert_to_byte_kernel()(&kernel_globals, (uchar4*)task.rgba_byte, (float*)task.buffer,
825                                                                  sample_scale, x, y, task.offset, task.stride);
826
827                 }
828         }
829
830         void thread_shader(DeviceTask& task)
831         {
832                 KernelGlobals kg = kernel_globals;
833
834 #ifdef WITH_OSL
835                 OSLShader::thread_init(&kg, &kernel_globals, &osl_globals);
836 #endif
837                 for(int sample = 0; sample < task.num_samples; sample++) {
838                         for(int x = task.shader_x; x < task.shader_x + task.shader_w; x++)
839                                 shader_kernel()(&kg,
840                                                 (uint4*)task.shader_input,
841                                                 (float4*)task.shader_output,
842                                                 task.shader_eval_type,
843                                                 task.shader_filter,
844                                                 x,
845                                                 task.offset,
846                                                 sample);
847
848                         if(task.get_cancel() || task_pool.canceled())
849                                 break;
850
851                         task.update_progress(NULL);
852
853                 }
854
855 #ifdef WITH_OSL
856                 OSLShader::thread_free(&kg);
857 #endif
858         }
859
860         int get_split_task_count(DeviceTask& task)
861         {
862                 if(task.type == DeviceTask::SHADER)
863                         return task.get_subtask_count(info.cpu_threads, 256);
864                 else
865                         return task.get_subtask_count(info.cpu_threads);
866         }
867
868         void task_add(DeviceTask& task)
869         {
870                 /* Load texture info. */
871                 load_texture_info();
872
873                 /* split task into smaller ones */
874                 list<DeviceTask> tasks;
875
876                 if(task.type == DeviceTask::SHADER)
877                         task.split(tasks, info.cpu_threads, 256);
878                 else
879                         task.split(tasks, info.cpu_threads);
880
881                 foreach(DeviceTask& task, tasks)
882                         task_pool.push(new CPUDeviceTask(this, task));
883         }
884
885         void task_wait()
886         {
887                 task_pool.wait_work();
888         }
889
890         void task_cancel()
891         {
892                 task_pool.cancel();
893         }
894
895 protected:
896         inline KernelGlobals thread_kernel_globals_init()
897         {
898                 KernelGlobals kg = kernel_globals;
899                 kg.transparent_shadow_intersections = NULL;
900                 const int decoupled_count = sizeof(kg.decoupled_volume_steps) /
901                                             sizeof(*kg.decoupled_volume_steps);
902                 for(int i = 0; i < decoupled_count; ++i) {
903                         kg.decoupled_volume_steps[i] = NULL;
904                 }
905                 kg.decoupled_volume_steps_index = 0;
906 #ifdef WITH_OSL
907                 OSLShader::thread_init(&kg, &kernel_globals, &osl_globals);
908 #endif
909                 return kg;
910         }
911
912         inline void thread_kernel_globals_free(KernelGlobals *kg)
913         {
914                 if(kg == NULL) {
915                         return;
916                 }
917
918                 if(kg->transparent_shadow_intersections != NULL) {
919                         free(kg->transparent_shadow_intersections);
920                 }
921                 const int decoupled_count = sizeof(kg->decoupled_volume_steps) /
922                                             sizeof(*kg->decoupled_volume_steps);
923                 for(int i = 0; i < decoupled_count; ++i) {
924                         if(kg->decoupled_volume_steps[i] != NULL) {
925                                 free(kg->decoupled_volume_steps[i]);
926                         }
927                 }
928 #ifdef WITH_OSL
929                 OSLShader::thread_free(kg);
930 #endif
931         }
932
933         virtual bool load_kernels(const DeviceRequestedFeatures& requested_features_) {
934                 requested_features = requested_features_;
935
936                 return true;
937         }
938 };
939
940 /* split kernel */
941
942 class CPUSplitKernelFunction : public SplitKernelFunction {
943 public:
944         CPUDevice* device;
945         void (*func)(KernelGlobals *kg, KernelData *data);
946
947         CPUSplitKernelFunction(CPUDevice* device) : device(device), func(NULL) {}
948         ~CPUSplitKernelFunction() {}
949
950         virtual bool enqueue(const KernelDimensions& dim, device_memory& kernel_globals, device_memory& data)
951         {
952                 if(!func) {
953                         return false;
954                 }
955
956                 KernelGlobals *kg = (KernelGlobals*)kernel_globals.device_pointer;
957                 kg->global_size = make_int2(dim.global_size[0], dim.global_size[1]);
958
959                 for(int y = 0; y < dim.global_size[1]; y++) {
960                         for(int x = 0; x < dim.global_size[0]; x++) {
961                                 kg->global_id = make_int2(x, y);
962
963                                 func(kg, (KernelData*)data.device_pointer);
964                         }
965                 }
966
967                 return true;
968         }
969 };
970
971 CPUSplitKernel::CPUSplitKernel(CPUDevice *device) : DeviceSplitKernel(device), device(device)
972 {
973 }
974
975 bool CPUSplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
976                                                     RenderTile& rtile,
977                                                     int num_global_elements,
978                                                     device_memory& kernel_globals,
979                                                     device_memory& data,
980                                                     device_memory& split_data,
981                                                     device_memory& ray_state,
982                                                     device_memory& queue_index,
983                                                     device_memory& use_queues_flags,
984                                                     device_memory& work_pool_wgs)
985 {
986         KernelGlobals *kg = (KernelGlobals*)kernel_globals.device_pointer;
987         kg->global_size = make_int2(dim.global_size[0], dim.global_size[1]);
988
989         for(int y = 0; y < dim.global_size[1]; y++) {
990                 for(int x = 0; x < dim.global_size[0]; x++) {
991                         kg->global_id = make_int2(x, y);
992
993                         device->data_init_kernel()((KernelGlobals*)kernel_globals.device_pointer,
994                                                    (KernelData*)data.device_pointer,
995                                                    (void*)split_data.device_pointer,
996                                                    num_global_elements,
997                                                    (char*)ray_state.device_pointer,
998                                                    rtile.start_sample,
999                                                    rtile.start_sample + rtile.num_samples,
1000                                                    rtile.x,
1001                                                    rtile.y,
1002                                                    rtile.w,
1003                                                    rtile.h,
1004                                                    rtile.offset,
1005                                                    rtile.stride,
1006                                                    (int*)queue_index.device_pointer,
1007                                                    dim.global_size[0] * dim.global_size[1],
1008                                                    (char*)use_queues_flags.device_pointer,
1009                                                    (uint*)work_pool_wgs.device_pointer,
1010                                                    rtile.num_samples,
1011                                                    (float*)rtile.buffer);
1012                 }
1013         }
1014
1015         return true;
1016 }
1017
1018 SplitKernelFunction* CPUSplitKernel::get_split_kernel_function(const string& kernel_name,
1019                                                                const DeviceRequestedFeatures&)
1020 {
1021         CPUSplitKernelFunction *kernel = new CPUSplitKernelFunction(device);
1022
1023         kernel->func = device->split_kernels[kernel_name]();
1024         if(!kernel->func) {
1025                 delete kernel;
1026                 return NULL;
1027         }
1028
1029         return kernel;
1030 }
1031
1032 int2 CPUSplitKernel::split_kernel_local_size()
1033 {
1034         return make_int2(1, 1);
1035 }
1036
1037 int2 CPUSplitKernel::split_kernel_global_size(device_memory& /*kg*/, device_memory& /*data*/, DeviceTask * /*task*/) {
1038         return make_int2(1, 1);
1039 }
1040
1041 uint64_t CPUSplitKernel::state_buffer_size(device_memory& kernel_globals, device_memory& /*data*/, size_t num_threads) {
1042         KernelGlobals *kg = (KernelGlobals*)kernel_globals.device_pointer;
1043
1044         return split_data_buffer_size(kg, num_threads);
1045 }
1046
1047 Device *device_cpu_create(DeviceInfo& info, Stats &stats, bool background)
1048 {
1049         return new CPUDevice(info, stats, background);
1050 }
1051
1052 void device_cpu_info(vector<DeviceInfo>& devices)
1053 {
1054         DeviceInfo info;
1055
1056         info.type = DEVICE_CPU;
1057         info.description = system_cpu_brand_string();
1058         info.id = "CPU";
1059         info.num = 0;
1060         info.advanced_shading = true;
1061         info.has_volume_decoupled = true;
1062         info.has_osl = true;
1063         info.has_half_images = true;
1064
1065         devices.insert(devices.begin(), info);
1066 }
1067
1068 string device_cpu_capabilities()
1069 {
1070         string capabilities = "";
1071         capabilities += system_cpu_support_sse2() ? "SSE2 " : "";
1072         capabilities += system_cpu_support_sse3() ? "SSE3 " : "";
1073         capabilities += system_cpu_support_sse41() ? "SSE41 " : "";
1074         capabilities += system_cpu_support_avx() ? "AVX " : "";
1075         capabilities += system_cpu_support_avx2() ? "AVX2" : "";
1076         if(capabilities[capabilities.size() - 1] == ' ')
1077                 capabilities.resize(capabilities.size() - 1);
1078         return capabilities;
1079 }
1080
1081 CCL_NAMESPACE_END