Cycles Denoising: Merge outlier heuristic and confidence interval test
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_logging.h"
41 #include "util/util_map.h"
42 #include "util/util_md5.h"
43 #include "util/util_opengl.h"
44 #include "util/util_path.h"
45 #include "util/util_string.h"
46 #include "util/util_system.h"
47 #include "util/util_types.h"
48 #include "util/util_time.h"
49
50 #include "kernel/split/kernel_split_data_types.h"
51
52 CCL_NAMESPACE_BEGIN
53
54 #ifndef WITH_CUDA_DYNLOAD
55
56 /* Transparently implement some functions, so majority of the file does not need
57  * to worry about difference between dynamically loaded and linked CUDA at all.
58  */
59
60 namespace {
61
62 const char *cuewErrorString(CUresult result)
63 {
64         /* We can only give error code here without major code duplication, that
65          * should be enough since dynamic loading is only being disabled by folks
66          * who knows what they're doing anyway.
67          *
68          * NOTE: Avoid call from several threads.
69          */
70         static string error;
71         error = string_printf("%d", result);
72         return error.c_str();
73 }
74
75 const char *cuewCompilerPath(void)
76 {
77         return CYCLES_CUDA_NVCC_EXECUTABLE;
78 }
79
80 int cuewCompilerVersion(void)
81 {
82         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
83 }
84
85 }  /* namespace */
86 #endif  /* WITH_CUDA_DYNLOAD */
87
88 class CUDADevice;
89
90 class CUDASplitKernel : public DeviceSplitKernel {
91         CUDADevice *device;
92 public:
93         explicit CUDASplitKernel(CUDADevice *device);
94
95         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
96
97         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
98                                                     RenderTile& rtile,
99                                                     int num_global_elements,
100                                                     device_memory& kernel_globals,
101                                                     device_memory& kernel_data_,
102                                                     device_memory& split_data,
103                                                     device_memory& ray_state,
104                                                     device_memory& queue_index,
105                                                     device_memory& use_queues_flag,
106                                                     device_memory& work_pool_wgs);
107
108         virtual SplitKernelFunction* get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&);
109         virtual int2 split_kernel_local_size();
110         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
111 };
112
113 class CUDADevice : public Device
114 {
115 public:
116         DedicatedTaskPool task_pool;
117         CUdevice cuDevice;
118         CUcontext cuContext;
119         CUmodule cuModule, cuFilterModule;
120         map<device_ptr, bool> tex_interp_map;
121         map<device_ptr, uint> tex_bindless_map;
122         int cuDevId;
123         int cuDevArchitecture;
124         bool first_error;
125         CUDASplitKernel *split_kernel;
126
127         struct PixelMem {
128                 GLuint cuPBO;
129                 CUgraphicsResource cuPBOresource;
130                 GLuint cuTexId;
131                 int w, h;
132         };
133
134         map<device_ptr, PixelMem> pixel_mem_map;
135
136         /* Bindless Textures */
137         device_vector<uint> bindless_mapping;
138         bool need_bindless_mapping;
139
140         CUdeviceptr cuda_device_ptr(device_ptr mem)
141         {
142                 return (CUdeviceptr)mem;
143         }
144
145         static bool have_precompiled_kernels()
146         {
147                 string cubins_path = path_get("lib");
148                 return path_exists(cubins_path);
149         }
150
151         virtual bool show_samples() const
152         {
153                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
154                 return true;
155         }
156
157 /*#ifdef NDEBUG
158 #define cuda_abort()
159 #else
160 #define cuda_abort() abort()
161 #endif*/
162         void cuda_error_documentation()
163         {
164                 if(first_error) {
165                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
166                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
167                         first_error = false;
168                 }
169         }
170
171 #define cuda_assert(stmt) \
172         { \
173                 CUresult result = stmt; \
174                 \
175                 if(result != CUDA_SUCCESS) { \
176                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
177                         if(error_msg == "") \
178                                 error_msg = message; \
179                         fprintf(stderr, "%s\n", message.c_str()); \
180                         /*cuda_abort();*/ \
181                         cuda_error_documentation(); \
182                 } \
183         } (void)0
184
185         bool cuda_error_(CUresult result, const string& stmt)
186         {
187                 if(result == CUDA_SUCCESS)
188                         return false;
189
190                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
191                 if(error_msg == "")
192                         error_msg = message;
193                 fprintf(stderr, "%s\n", message.c_str());
194                 cuda_error_documentation();
195                 return true;
196         }
197
198 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
199
200         void cuda_error_message(const string& message)
201         {
202                 if(error_msg == "")
203                         error_msg = message;
204                 fprintf(stderr, "%s\n", message.c_str());
205                 cuda_error_documentation();
206         }
207
208         void cuda_push_context()
209         {
210                 cuda_assert(cuCtxSetCurrent(cuContext));
211         }
212
213         void cuda_pop_context()
214         {
215                 cuda_assert(cuCtxSetCurrent(NULL));
216         }
217
218         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
219         : Device(info, stats, background_)
220         {
221                 first_error = true;
222                 background = background_;
223
224                 cuDevId = info.num;
225                 cuDevice = 0;
226                 cuContext = 0;
227
228                 cuModule = 0;
229                 cuFilterModule = 0;
230
231                 split_kernel = NULL;
232
233                 need_bindless_mapping = false;
234
235                 /* intialize */
236                 if(cuda_error(cuInit(0)))
237                         return;
238
239                 /* setup device and context */
240                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
241                         return;
242
243                 CUresult result;
244
245                 if(background) {
246                         result = cuCtxCreate(&cuContext, 0, cuDevice);
247                 }
248                 else {
249                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
250
251                         if(result != CUDA_SUCCESS) {
252                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
253                                 background = true;
254                         }
255                 }
256
257                 if(cuda_error_(result, "cuCtxCreate"))
258                         return;
259
260                 int major, minor;
261                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
262                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
263                 cuDevArchitecture = major*100 + minor*10;
264
265                 cuda_pop_context();
266         }
267
268         ~CUDADevice()
269         {
270                 task_pool.stop();
271
272                 delete split_kernel;
273
274                 if(info.has_bindless_textures) {
275                         tex_free(bindless_mapping);
276                 }
277
278                 cuda_assert(cuCtxDestroy(cuContext));
279         }
280
281         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
282         {
283                 int major, minor;
284                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
285                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
286
287                 /* We only support sm_20 and above */
288                 if(major < 2) {
289                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
290                         return false;
291                 }
292
293                 return true;
294         }
295
296         bool use_adaptive_compilation()
297         {
298                 return DebugFlags().cuda.adaptive_compile;
299         }
300
301         bool use_split_kernel()
302         {
303                 return DebugFlags().cuda.split_kernel;
304         }
305
306         /* Common NVCC flags which stays the same regardless of shading model,
307          * kernel sources md5 and only depends on compiler or compilation settings.
308          */
309         string compile_kernel_get_common_cflags(
310                 const DeviceRequestedFeatures& requested_features,
311                 bool filter=false, bool split=false)
312         {
313                 const int cuda_version = cuewCompilerVersion();
314                 const int machine = system_cpu_bits();
315                 const string source_path = path_get("source");
316                 const string include_path = source_path;
317                 string cflags = string_printf("-m%d "
318                                               "--ptxas-options=\"-v\" "
319                                               "--use_fast_math "
320                                               "-DNVCC "
321                                               "-D__KERNEL_CUDA_VERSION__=%d "
322                                                "-I\"%s\"",
323                                               machine,
324                                               cuda_version,
325                                               include_path.c_str());
326                 if(!filter && use_adaptive_compilation()) {
327                         cflags += " " + requested_features.get_build_options();
328                 }
329                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
330                 if(extra_cflags) {
331                         cflags += string(" ") + string(extra_cflags);
332                 }
333 #ifdef WITH_CYCLES_DEBUG
334                 cflags += " -D__KERNEL_DEBUG__";
335 #endif
336
337                 if(split) {
338                         cflags += " -D__SPLIT__";
339                 }
340
341                 return cflags;
342         }
343
344         bool compile_check_compiler() {
345                 const char *nvcc = cuewCompilerPath();
346                 if(nvcc == NULL) {
347                         cuda_error_message("CUDA nvcc compiler not found. "
348                                            "Install CUDA toolkit in default location.");
349                         return false;
350                 }
351                 const int cuda_version = cuewCompilerVersion();
352                 VLOG(1) << "Found nvcc " << nvcc
353                         << ", CUDA version " << cuda_version
354                         << ".";
355                 const int major = cuda_version / 10, minor = cuda_version & 10;
356                 if(cuda_version == 0) {
357                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
358                         return false;
359                 }
360                 if(cuda_version < 80) {
361                         printf("Unsupported CUDA version %d.%d detected, "
362                                "you need CUDA 8.0 or newer.\n",
363                                major, minor);
364                         return false;
365                 }
366                 else if(cuda_version != 80) {
367                         printf("CUDA version %d.%d detected, build may succeed but only "
368                                "CUDA 8.0 is officially supported.\n",
369                                major, minor);
370                 }
371                 return true;
372         }
373
374         string compile_kernel(const DeviceRequestedFeatures& requested_features,
375                               bool filter=false, bool split=false)
376         {
377                 const char *name, *source;
378                 if(filter) {
379                         name = "filter";
380                         source = "filter.cu";
381                 }
382                 else if(split) {
383                         name = "kernel_split";
384                         source = "kernel_split.cu";
385                 }
386                 else {
387                         name = "kernel";
388                         source = "kernel.cu";
389                 }
390                 /* Compute cubin name. */
391                 int major, minor;
392                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
393                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
394
395                 /* Attempt to use kernel provided with Blender. */
396                 if(!use_adaptive_compilation()) {
397                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
398                                                                     name, major, minor));
399                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
400                         if(path_exists(cubin)) {
401                                 VLOG(1) << "Using precompiled kernel.";
402                                 return cubin;
403                         }
404                 }
405
406                 const string common_cflags =
407                         compile_kernel_get_common_cflags(requested_features, filter, split);
408
409                 /* Try to use locally compiled kernel. */
410                 const string source_path = path_get("source");
411                 const string kernel_md5 = path_files_md5_hash(source_path);
412
413                 /* We include cflags into md5 so changing cuda toolkit or changing other
414                  * compiler command line arguments makes sure cubin gets re-built.
415                  */
416                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
417
418                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
419                                                         name, major, minor,
420                                                         cubin_md5.c_str());
421                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
422                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
423                 if(path_exists(cubin)) {
424                         VLOG(1) << "Using locally compiled kernel.";
425                         return cubin;
426                 }
427
428 #ifdef _WIN32
429                 if(have_precompiled_kernels()) {
430                         if(major < 2) {
431                                 cuda_error_message(string_printf(
432                                         "CUDA device requires compute capability 2.0 or up, "
433                                         "found %d.%d. Your GPU is not supported.",
434                                         major, minor));
435                         }
436                         else {
437                                 cuda_error_message(string_printf(
438                                         "CUDA binary kernel for this graphics card compute "
439                                         "capability (%d.%d) not found.",
440                                         major, minor));
441                         }
442                         return "";
443                 }
444 #endif
445
446                 /* Compile. */
447                 if(!compile_check_compiler()) {
448                         return "";
449                 }
450                 const char *nvcc = cuewCompilerPath();
451                 const string kernel = path_join(
452                         path_join(source_path, "kernel"),
453                         path_join("kernels",
454                                   path_join("cuda", source)));
455                 double starttime = time_dt();
456                 printf("Compiling CUDA kernel ...\n");
457
458                 path_create_directories(cubin);
459
460                 string command = string_printf("\"%s\" "
461                                                "-arch=sm_%d%d "
462                                                "--cubin \"%s\" "
463                                                "-o \"%s\" "
464                                                "%s ",
465                                                nvcc,
466                                                major, minor,
467                                                kernel.c_str(),
468                                                cubin.c_str(),
469                                                common_cflags.c_str());
470
471                 printf("%s\n", command.c_str());
472
473                 if(system(command.c_str()) == -1) {
474                         cuda_error_message("Failed to execute compilation command, "
475                                            "see console for details.");
476                         return "";
477                 }
478
479                 /* Verify if compilation succeeded */
480                 if(!path_exists(cubin)) {
481                         cuda_error_message("CUDA kernel compilation failed, "
482                                            "see console for details.");
483                         return "";
484                 }
485
486                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
487
488                 return cubin;
489         }
490
491         bool load_kernels(const DeviceRequestedFeatures& requested_features)
492         {
493                 /* TODO(sergey): Support kernels re-load for CUDA devices.
494                  *
495                  * Currently re-loading kernel will invalidate memory pointers,
496                  * causing problems in cuCtxSynchronize.
497                  */
498                 if(cuFilterModule && cuModule) {
499                         VLOG(1) << "Skipping kernel reload, not currently supported.";
500                         return true;
501                 }
502
503                 /* check if cuda init succeeded */
504                 if(cuContext == 0)
505                         return false;
506
507                 /* check if GPU is supported */
508                 if(!support_device(requested_features))
509                         return false;
510
511                 /* get kernel */
512                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
513                 if(cubin == "")
514                         return false;
515
516                 string filter_cubin = compile_kernel(requested_features, true, false);
517                 if(filter_cubin == "")
518                         return false;
519
520                 /* open module */
521                 cuda_push_context();
522
523                 string cubin_data;
524                 CUresult result;
525
526                 if(path_read_text(cubin, cubin_data))
527                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
528                 else
529                         result = CUDA_ERROR_FILE_NOT_FOUND;
530
531                 if(cuda_error_(result, "cuModuleLoad"))
532                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
533
534                 if(path_read_text(filter_cubin, cubin_data))
535                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
536                 else
537                         result = CUDA_ERROR_FILE_NOT_FOUND;
538
539                 if(cuda_error_(result, "cuModuleLoad"))
540                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
541
542                 cuda_pop_context();
543
544                 return (result == CUDA_SUCCESS);
545         }
546
547         void load_bindless_mapping()
548         {
549                 if(info.has_bindless_textures && need_bindless_mapping) {
550                         tex_free(bindless_mapping);
551                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
552                         need_bindless_mapping = false;
553                 }
554         }
555
556         void mem_alloc(const char *name, device_memory& mem, MemoryType /*type*/)
557         {
558                 if(name) {
559                         VLOG(1) << "Buffer allocate: " << name << ", "
560                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
561                                 << string_human_readable_size(mem.memory_size()) << ")";
562                 }
563
564                 cuda_push_context();
565                 CUdeviceptr device_pointer;
566                 size_t size = mem.memory_size();
567                 cuda_assert(cuMemAlloc(&device_pointer, size));
568                 mem.device_pointer = (device_ptr)device_pointer;
569                 mem.device_size = size;
570                 stats.mem_alloc(size);
571                 cuda_pop_context();
572         }
573
574         void mem_copy_to(device_memory& mem)
575         {
576                 cuda_push_context();
577                 if(mem.device_pointer)
578                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
579                 cuda_pop_context();
580         }
581
582         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
583         {
584                 size_t offset = elem*y*w;
585                 size_t size = elem*w*h;
586
587                 cuda_push_context();
588                 if(mem.device_pointer) {
589                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
590                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
591                 }
592                 else {
593                         memset((char*)mem.data_pointer + offset, 0, size);
594                 }
595                 cuda_pop_context();
596         }
597
598         void mem_zero(device_memory& mem)
599         {
600                 if(mem.data_pointer) {
601                         memset((void*)mem.data_pointer, 0, mem.memory_size());
602                 }
603
604                 cuda_push_context();
605                 if(mem.device_pointer)
606                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
607                 cuda_pop_context();
608         }
609
610         void mem_free(device_memory& mem)
611         {
612                 if(mem.device_pointer) {
613                         cuda_push_context();
614                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
615                         cuda_pop_context();
616
617                         mem.device_pointer = 0;
618
619                         stats.mem_free(mem.device_size);
620                         mem.device_size = 0;
621                 }
622         }
623
624         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/, MemoryType /*type*/)
625         {
626                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
627         }
628
629         void const_copy_to(const char *name, void *host, size_t size)
630         {
631                 CUdeviceptr mem;
632                 size_t bytes;
633
634                 cuda_push_context();
635                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
636                 //assert(bytes == size);
637                 cuda_assert(cuMemcpyHtoD(mem, host, size));
638                 cuda_pop_context();
639         }
640
641         void tex_alloc(const char *name,
642                        device_memory& mem,
643                        InterpolationType interpolation,
644                        ExtensionType extension)
645         {
646                 VLOG(1) << "Texture allocate: " << name << ", "
647                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
648                         << string_human_readable_size(mem.memory_size()) << ")";
649
650                 /* Check if we are on sm_30 or above.
651                  * We use arrays and bindles textures for storage there */
652                 bool has_bindless_textures = info.has_bindless_textures;
653
654                 /* General variables for both architectures */
655                 string bind_name = name;
656                 size_t dsize = datatype_size(mem.data_type);
657                 size_t size = mem.memory_size();
658
659                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
660                 switch(extension) {
661                         case EXTENSION_REPEAT:
662                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
663                                 break;
664                         case EXTENSION_EXTEND:
665                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
666                                 break;
667                         case EXTENSION_CLIP:
668                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
669                                 break;
670                         default:
671                                 assert(0);
672                                 break;
673                 }
674
675                 CUfilter_mode filter_mode;
676                 if(interpolation == INTERPOLATION_CLOSEST) {
677                         filter_mode = CU_TR_FILTER_MODE_POINT;
678                 }
679                 else {
680                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
681                 }
682
683                 CUarray_format_enum format;
684                 switch(mem.data_type) {
685                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
686                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
687                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
688                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
689                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
690                         default: assert(0); return;
691                 }
692
693                 /* General variables for Fermi */
694                 CUtexref texref = NULL;
695
696                 if(!has_bindless_textures) {
697                         if(mem.data_depth > 1) {
698                                 /* Kernel uses different bind names for 2d and 3d float textures,
699                                  * so we have to adjust couple of things here.
700                                  */
701                                 vector<string> tokens;
702                                 string_split(tokens, name, "_");
703                                 bind_name = string_printf("__tex_image_%s_3d_%s",
704                                                           tokens[2].c_str(),
705                                                           tokens[3].c_str());
706                         }
707
708                         cuda_push_context();
709                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
710                         cuda_pop_context();
711
712                         if(!texref) {
713                                 return;
714                         }
715                 }
716
717                 /* Data Storage */
718                 if(interpolation == INTERPOLATION_NONE) {
719                         if(has_bindless_textures) {
720                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
721                                 mem_copy_to(mem);
722
723                                 cuda_push_context();
724
725                                 CUdeviceptr cumem;
726                                 size_t cubytes;
727
728                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
729
730                                 if(cubytes == 8) {
731                                         /* 64 bit device pointer */
732                                         uint64_t ptr = mem.device_pointer;
733                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
734                                 }
735                                 else {
736                                         /* 32 bit device pointer */
737                                         uint32_t ptr = (uint32_t)mem.device_pointer;
738                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
739                                 }
740
741                                 cuda_pop_context();
742                         }
743                         else {
744                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
745                                 mem_copy_to(mem);
746
747                                 cuda_push_context();
748
749                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
750                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
751                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
752
753                                 cuda_pop_context();
754                         }
755                 }
756                 /* Texture Storage */
757                 else {
758                         CUarray handle = NULL;
759
760                         cuda_push_context();
761
762                         if(mem.data_depth > 1) {
763                                 CUDA_ARRAY3D_DESCRIPTOR desc;
764
765                                 desc.Width = mem.data_width;
766                                 desc.Height = mem.data_height;
767                                 desc.Depth = mem.data_depth;
768                                 desc.Format = format;
769                                 desc.NumChannels = mem.data_elements;
770                                 desc.Flags = 0;
771
772                                 cuda_assert(cuArray3DCreate(&handle, &desc));
773                         }
774                         else {
775                                 CUDA_ARRAY_DESCRIPTOR desc;
776
777                                 desc.Width = mem.data_width;
778                                 desc.Height = mem.data_height;
779                                 desc.Format = format;
780                                 desc.NumChannels = mem.data_elements;
781
782                                 cuda_assert(cuArrayCreate(&handle, &desc));
783                         }
784
785                         if(!handle) {
786                                 cuda_pop_context();
787                                 return;
788                         }
789
790                         /* Allocate 3D, 2D or 1D memory */
791                         if(mem.data_depth > 1) {
792                                 CUDA_MEMCPY3D param;
793                                 memset(&param, 0, sizeof(param));
794                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
795                                 param.dstArray = handle;
796                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
797                                 param.srcHost = (void*)mem.data_pointer;
798                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
799                                 param.WidthInBytes = param.srcPitch;
800                                 param.Height = mem.data_height;
801                                 param.Depth = mem.data_depth;
802
803                                 cuda_assert(cuMemcpy3D(&param));
804                         }
805                         else if(mem.data_height > 1) {
806                                 CUDA_MEMCPY2D param;
807                                 memset(&param, 0, sizeof(param));
808                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
809                                 param.dstArray = handle;
810                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
811                                 param.srcHost = (void*)mem.data_pointer;
812                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
813                                 param.WidthInBytes = param.srcPitch;
814                                 param.Height = mem.data_height;
815
816                                 cuda_assert(cuMemcpy2D(&param));
817                         }
818                         else
819                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
820
821                         /* Fermi and Kepler */
822                         mem.device_pointer = (device_ptr)handle;
823                         mem.device_size = size;
824
825                         stats.mem_alloc(size);
826
827                         /* Bindless Textures - Kepler */
828                         if(has_bindless_textures) {
829                                 int flat_slot = 0;
830                                 if(string_startswith(name, "__tex_image")) {
831                                         int pos =  string(name).rfind("_");
832                                         flat_slot = atoi(name + pos + 1);
833                                 }
834                                 else {
835                                         assert(0);
836                                 }
837
838                                 CUDA_RESOURCE_DESC resDesc;
839                                 memset(&resDesc, 0, sizeof(resDesc));
840                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
841                                 resDesc.res.array.hArray = handle;
842                                 resDesc.flags = 0;
843
844                                 CUDA_TEXTURE_DESC texDesc;
845                                 memset(&texDesc, 0, sizeof(texDesc));
846                                 texDesc.addressMode[0] = address_mode;
847                                 texDesc.addressMode[1] = address_mode;
848                                 texDesc.addressMode[2] = address_mode;
849                                 texDesc.filterMode = filter_mode;
850                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
851
852                                 CUtexObject tex = 0;
853                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
854
855                                 /* Safety check */
856                                 if((uint)tex > UINT_MAX) {
857                                         assert(0);
858                                 }
859
860                                 /* Resize once */
861                                 if(flat_slot >= bindless_mapping.size()) {
862                                         /* Allocate some slots in advance, to reduce amount
863                                          * of re-allocations.
864                                          */
865                                         bindless_mapping.resize(flat_slot + 128);
866                                 }
867
868                                 /* Set Mapping and tag that we need to (re-)upload to device */
869                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
870                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
871                                 need_bindless_mapping = true;
872                         }
873                         /* Regular Textures - Fermi */
874                         else {
875                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
876                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
877                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
878                         }
879
880                         cuda_pop_context();
881                 }
882
883                 /* Fermi, Data and Image Textures */
884                 if(!has_bindless_textures) {
885                         cuda_push_context();
886
887                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
888                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
889                         if(mem.data_depth > 1) {
890                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
891                         }
892
893                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
894
895                         cuda_pop_context();
896                 }
897
898                 /* Fermi and Kepler */
899                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
900         }
901
902         void tex_free(device_memory& mem)
903         {
904                 if(mem.device_pointer) {
905                         if(tex_interp_map[mem.device_pointer]) {
906                                 cuda_push_context();
907                                 cuArrayDestroy((CUarray)mem.device_pointer);
908                                 cuda_pop_context();
909
910                                 /* Free CUtexObject (Bindless Textures) */
911                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
912                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
913                                         cuTexObjectDestroy(flat_slot);
914                                 }
915
916                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
917                                 mem.device_pointer = 0;
918
919                                 stats.mem_free(mem.device_size);
920                                 mem.device_size = 0;
921                         }
922                         else {
923                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
924                                 mem_free(mem);
925                         }
926                 }
927         }
928
929         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
930         {
931                 mem_alloc("Denoising Tile Info", task->tiles_mem, MEM_READ_ONLY);
932
933                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.data_pointer;
934                 for(int i = 0; i < 9; i++) {
935                         tiles->buffers[i] = buffers[i];
936                 }
937
938                 mem_copy_to(task->tiles_mem);
939
940                 return !have_error();
941         }
942
943 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
944                         int threads_per_block;                                                                              \
945                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
946                         int threads = (int)sqrt((float)threads_per_block);                                                  \
947                         int xblocks = ((w) + threads - 1)/threads;                                                          \
948                         int yblocks = ((h) + threads - 1)/threads;
949
950 #define CUDA_LAUNCH_KERNEL(func, args)                      \
951                         cuda_assert(cuLaunchKernel(func,                \
952                                                    xblocks, yblocks, 1, \
953                                                    threads, threads, 1, \
954                                                    0, 0, args, 0));
955
956         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
957                                        DenoisingTask *task)
958         {
959                 if(have_error())
960                         return false;
961
962                 cuda_push_context();
963
964                 int4 rect = task->rect;
965                 int w = align_up(rect.z-rect.x, 4);
966                 int h = rect.w-rect.y;
967                 int r = task->nlm_state.r;
968                 int f = task->nlm_state.f;
969                 float a = task->nlm_state.a;
970                 float k_2 = task->nlm_state.k_2;
971
972                 CUdeviceptr difference     = task->nlm_state.temporary_1_ptr;
973                 CUdeviceptr blurDifference = task->nlm_state.temporary_2_ptr;
974                 CUdeviceptr weightAccum    = task->nlm_state.temporary_3_ptr;
975
976                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*w*h));
977                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*w*h));
978
979                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput, cuNLMNormalize;
980                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
981                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
982                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
983                 cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
984                 cuda_assert(cuModuleGetFunction(&cuNLMNormalize,      cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
985
986                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
987                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
988                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
989                 cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
990                 cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize,      CU_FUNC_CACHE_PREFER_L1));
991
992                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference, rect.z-rect.x, rect.w-rect.y);
993
994                 int dx, dy;
995                 int4 local_rect;
996                 int channel_offset = 0;
997                 void *calc_difference_args[] = {&dx, &dy, &guide_ptr, &variance_ptr, &difference, &local_rect, &w, &channel_offset, &a, &k_2};
998                 void *blur_args[]            = {&difference, &blurDifference, &local_rect, &w, &f};
999                 void *calc_weight_args[]     = {&blurDifference, &difference, &local_rect, &w, &f};
1000                 void *update_output_args[]   = {&dx, &dy, &blurDifference, &image_ptr, &out_ptr, &weightAccum, &local_rect, &w, &f};
1001
1002                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1003                         dy = i / (2*r+1) - r;
1004                         dx = i % (2*r+1) - r;
1005                         local_rect = make_int4(max(0, -dx), max(0, -dy), rect.z-rect.x - max(0, dx), rect.w-rect.y - max(0, dy));
1006
1007                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1008                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1009                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1010                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1011                         CUDA_LAUNCH_KERNEL(cuNLMUpdateOutput, update_output_args);
1012                 }
1013
1014                 local_rect = make_int4(0, 0, rect.z-rect.x, rect.w-rect.y);
1015                 void *normalize_args[] = {&out_ptr, &weightAccum, &local_rect, &w};
1016                 CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1017                 cuda_assert(cuCtxSynchronize());
1018
1019                 cuda_pop_context();
1020                 return !have_error();
1021         }
1022
1023         bool denoising_construct_transform(DenoisingTask *task)
1024         {
1025                 if(have_error())
1026                         return false;
1027
1028                 cuda_push_context();
1029
1030                 CUfunction cuFilterConstructTransform;
1031                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1032                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1033                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1034                                    task->storage.w,
1035                                    task->storage.h);
1036
1037                 void *args[] = {&task->buffer.mem.device_pointer,
1038                                 &task->storage.transform.device_pointer,
1039                                 &task->storage.rank.device_pointer,
1040                                 &task->filter_area,
1041                                 &task->rect,
1042                                 &task->radius,
1043                                 &task->pca_threshold,
1044                                 &task->buffer.pass_stride};
1045                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1046                 cuda_assert(cuCtxSynchronize());
1047
1048                 cuda_pop_context();
1049                 return !have_error();
1050         }
1051
1052         bool denoising_reconstruct(device_ptr color_ptr,
1053                                    device_ptr color_variance_ptr,
1054                                    device_ptr output_ptr,
1055                                    DenoisingTask *task)
1056         {
1057                 if(have_error())
1058                         return false;
1059
1060                 mem_zero(task->storage.XtWX);
1061                 mem_zero(task->storage.XtWY);
1062
1063                 cuda_push_context();
1064
1065                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian, cuFinalize;
1066                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1067                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1068                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1069                 cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1070                 cuda_assert(cuModuleGetFunction(&cuFinalize,            cuFilterModule, "kernel_cuda_filter_finalize"));
1071
1072                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1073                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1074                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1075                 cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1076                 cuda_assert(cuFuncSetCacheConfig(cuFinalize,            CU_FUNC_CACHE_PREFER_L1));
1077
1078                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference,
1079                                    task->reconstruction_state.source_w,
1080                                    task->reconstruction_state.source_h);
1081
1082                 CUdeviceptr difference     = task->reconstruction_state.temporary_1_ptr;
1083                 CUdeviceptr blurDifference = task->reconstruction_state.temporary_2_ptr;
1084
1085                 int r = task->radius;
1086                 int f = 4;
1087                 float a = 1.0f;
1088                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1089                         int dy = i / (2*r+1) - r;
1090                         int dx = i % (2*r+1) - r;
1091
1092                         int local_rect[4] = {max(0, -dx), max(0, -dy),
1093                                              task->reconstruction_state.source_w - max(0, dx),
1094                                              task->reconstruction_state.source_h - max(0, dy)};
1095
1096                         void *calc_difference_args[] = {&dx, &dy,
1097                                                         &color_ptr,
1098                                                         &color_variance_ptr,
1099                                                         &difference,
1100                                                         &local_rect,
1101                                                         &task->buffer.w,
1102                                                         &task->buffer.pass_stride,
1103                                                         &a,
1104                                                         &task->nlm_k_2};
1105                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1106
1107                         void *blur_args[] = {&difference,
1108                                              &blurDifference,
1109                                              &local_rect,
1110                                              &task->buffer.w,
1111                                              &f};
1112                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1113
1114                         void *calc_weight_args[] = {&blurDifference,
1115                                                     &difference,
1116                                                     &local_rect,
1117                                                     &task->buffer.w,
1118                                                     &f};
1119                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1120
1121                         /* Reuse previous arguments. */
1122                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1123
1124                         void *construct_gramian_args[] = {&dx, &dy,
1125                                                           &blurDifference,
1126                                                           &task->buffer.mem.device_pointer,
1127                                                           &task->storage.transform.device_pointer,
1128                                                           &task->storage.rank.device_pointer,
1129                                                           &task->storage.XtWX.device_pointer,
1130                                                           &task->storage.XtWY.device_pointer,
1131                                                           &local_rect,
1132                                                           &task->reconstruction_state.filter_rect,
1133                                                           &task->buffer.w,
1134                                                           &task->buffer.h,
1135                                                           &f,
1136                                                       &task->buffer.pass_stride};
1137                         CUDA_LAUNCH_KERNEL(cuNLMConstructGramian, construct_gramian_args);
1138                 }
1139
1140                 void *finalize_args[] = {&task->buffer.w,
1141                                          &task->buffer.h,
1142                                          &output_ptr,
1143                                                  &task->storage.rank.device_pointer,
1144                                                  &task->storage.XtWX.device_pointer,
1145                                                  &task->storage.XtWY.device_pointer,
1146                                                  &task->filter_area,
1147                                                  &task->reconstruction_state.buffer_params.x,
1148                                                  &task->render_buffer.samples};
1149                 CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1150                 cuda_assert(cuCtxSynchronize());
1151
1152                 cuda_pop_context();
1153                 return !have_error();
1154         }
1155
1156         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1157                                       device_ptr mean_ptr, device_ptr variance_ptr,
1158                                       int r, int4 rect, DenoisingTask *task)
1159         {
1160                 if(have_error())
1161                         return false;
1162
1163                 cuda_push_context();
1164
1165                 CUfunction cuFilterCombineHalves;
1166                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1167                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1168                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1169                                    task->rect.z-task->rect.x,
1170                                    task->rect.w-task->rect.y);
1171
1172                 void *args[] = {&mean_ptr,
1173                                 &variance_ptr,
1174                                 &a_ptr,
1175                                 &b_ptr,
1176                                 &rect,
1177                                 &r};
1178                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1179                 cuda_assert(cuCtxSynchronize());
1180
1181                 cuda_pop_context();
1182                 return !have_error();
1183         }
1184
1185         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1186                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1187                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1188         {
1189                 if(have_error())
1190                         return false;
1191
1192                 cuda_push_context();
1193
1194                 CUfunction cuFilterDivideShadow;
1195                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1196                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1197                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1198                                    task->rect.z-task->rect.x,
1199                                    task->rect.w-task->rect.y);
1200
1201                 bool use_split_variance = use_split_kernel();
1202                 void *args[] = {&task->render_buffer.samples,
1203                                 &task->tiles_mem.device_pointer,
1204                                 &a_ptr,
1205                                 &b_ptr,
1206                                 &sample_variance_ptr,
1207                                 &sv_variance_ptr,
1208                                 &buffer_variance_ptr,
1209                                 &task->rect,
1210                                 &task->render_buffer.pass_stride,
1211                                 &task->render_buffer.denoising_data_offset,
1212                                 &use_split_variance};
1213                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1214                 cuda_assert(cuCtxSynchronize());
1215
1216                 cuda_pop_context();
1217                 return !have_error();
1218         }
1219
1220         bool denoising_get_feature(int mean_offset,
1221                                    int variance_offset,
1222                                    device_ptr mean_ptr,
1223                                    device_ptr variance_ptr,
1224                                    DenoisingTask *task)
1225         {
1226                 if(have_error())
1227                         return false;
1228
1229                 cuda_push_context();
1230
1231                 CUfunction cuFilterGetFeature;
1232                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1233                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1234                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1235                                    task->rect.z-task->rect.x,
1236                                    task->rect.w-task->rect.y);
1237
1238                 bool use_split_variance = use_split_kernel();
1239                 void *args[] = {&task->render_buffer.samples,
1240                                 &task->tiles_mem.device_pointer,
1241                                         &mean_offset,
1242                                         &variance_offset,
1243                                 &mean_ptr,
1244                                 &variance_ptr,
1245                                 &task->rect,
1246                                 &task->render_buffer.pass_stride,
1247                                 &task->render_buffer.denoising_data_offset,
1248                                 &use_split_variance};
1249                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1250                 cuda_assert(cuCtxSynchronize());
1251
1252                 cuda_pop_context();
1253                 return !have_error();
1254         }
1255
1256         bool denoising_detect_outliers(device_ptr image_ptr,
1257                                        device_ptr variance_ptr,
1258                                        device_ptr depth_ptr,
1259                                        device_ptr output_ptr,
1260                                        DenoisingTask *task)
1261         {
1262                 if(have_error())
1263                         return false;
1264
1265                 cuda_push_context();
1266
1267                 CUfunction cuFilterDetectOutliers;
1268                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1269                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1270                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1271                                    task->rect.z-task->rect.x,
1272                                    task->rect.w-task->rect.y);
1273
1274                 void *args[] = {&image_ptr,
1275                                 &variance_ptr,
1276                                 &depth_ptr,
1277                                 &output_ptr,
1278                                 &task->rect,
1279                                 &task->buffer.pass_stride};
1280
1281                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1282                 cuda_assert(cuCtxSynchronize());
1283
1284                 cuda_pop_context();
1285                 return !have_error();
1286         }
1287
1288         void denoise(RenderTile &rtile, const DeviceTask &task)
1289         {
1290                 DenoisingTask denoising(this);
1291
1292                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1293                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1294                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1295                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1296                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1297                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1298                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1299                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1300
1301                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1302                 denoising.render_buffer.samples = rtile.sample;
1303
1304                 RenderTile rtiles[9];
1305                 rtiles[4] = rtile;
1306                 task.map_neighbor_tiles(rtiles, this);
1307                 denoising.tiles_from_rendertiles(rtiles);
1308
1309                 denoising.init_from_devicetask(task);
1310
1311                 denoising.run_denoising();
1312
1313                 task.unmap_neighbor_tiles(rtiles, this);
1314         }
1315
1316         void path_trace(RenderTile& rtile, int sample, bool branched)
1317         {
1318                 if(have_error())
1319                         return;
1320
1321                 cuda_push_context();
1322
1323                 CUfunction cuPathTrace;
1324                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
1325                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
1326
1327                 /* get kernel function */
1328                 if(branched) {
1329                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1330                 }
1331                 else {
1332                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1333                 }
1334
1335                 if(have_error())
1336                         return;
1337
1338                 /* pass in parameters */
1339                 void *args[] = {&d_buffer,
1340                                 &d_rng_state,
1341                                 &sample,
1342                                 &rtile.x,
1343                                 &rtile.y,
1344                                 &rtile.w,
1345                                 &rtile.h,
1346                                 &rtile.offset,
1347                                 &rtile.stride};
1348
1349                 /* launch kernel */
1350                 int threads_per_block;
1351                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
1352
1353                 /*int num_registers;
1354                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
1355
1356                 printf("threads_per_block %d\n", threads_per_block);
1357                 printf("num_registers %d\n", num_registers);*/
1358
1359                 int xthreads = (int)sqrt(threads_per_block);
1360                 int ythreads = (int)sqrt(threads_per_block);
1361                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
1362                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
1363
1364                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1365
1366                 cuda_assert(cuLaunchKernel(cuPathTrace,
1367                                            xblocks , yblocks, 1, /* blocks */
1368                                            xthreads, ythreads, 1, /* threads */
1369                                            0, 0, args, 0));
1370
1371                 cuda_assert(cuCtxSynchronize());
1372
1373                 cuda_pop_context();
1374         }
1375
1376         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1377         {
1378                 if(have_error())
1379                         return;
1380
1381                 cuda_push_context();
1382
1383                 CUfunction cuFilmConvert;
1384                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1385                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1386
1387                 /* get kernel function */
1388                 if(rgba_half) {
1389                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1390                 }
1391                 else {
1392                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1393                 }
1394
1395
1396                 float sample_scale = 1.0f/(task.sample + 1);
1397
1398                 /* pass in parameters */
1399                 void *args[] = {&d_rgba,
1400                                 &d_buffer,
1401                                 &sample_scale,
1402                                 &task.x,
1403                                 &task.y,
1404                                 &task.w,
1405                                 &task.h,
1406                                 &task.offset,
1407                                 &task.stride};
1408
1409                 /* launch kernel */
1410                 int threads_per_block;
1411                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1412
1413                 int xthreads = (int)sqrt(threads_per_block);
1414                 int ythreads = (int)sqrt(threads_per_block);
1415                 int xblocks = (task.w + xthreads - 1)/xthreads;
1416                 int yblocks = (task.h + ythreads - 1)/ythreads;
1417
1418                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1419
1420                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1421                                            xblocks , yblocks, 1, /* blocks */
1422                                            xthreads, ythreads, 1, /* threads */
1423                                            0, 0, args, 0));
1424
1425                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1426
1427                 cuda_pop_context();
1428         }
1429
1430         void shader(DeviceTask& task)
1431         {
1432                 if(have_error())
1433                         return;
1434
1435                 cuda_push_context();
1436
1437                 CUfunction cuShader;
1438                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1439                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1440                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
1441
1442                 /* get kernel function */
1443                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1444                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1445                 }
1446                 else {
1447                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
1448                 }
1449
1450                 /* do tasks in smaller chunks, so we can cancel it */
1451                 const int shader_chunk_size = 65536;
1452                 const int start = task.shader_x;
1453                 const int end = task.shader_x + task.shader_w;
1454                 int offset = task.offset;
1455
1456                 bool canceled = false;
1457                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1458                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1459                                 int shader_w = min(shader_chunk_size, end - shader_x);
1460
1461                                 /* pass in parameters */
1462                                 void *args[8];
1463                                 int arg = 0;
1464                                 args[arg++] = &d_input;
1465                                 args[arg++] = &d_output;
1466                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
1467                                         args[arg++] = &d_output_luma;
1468                                 }
1469                                 args[arg++] = &task.shader_eval_type;
1470                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1471                                         args[arg++] = &task.shader_filter;
1472                                 }
1473                                 args[arg++] = &shader_x;
1474                                 args[arg++] = &shader_w;
1475                                 args[arg++] = &offset;
1476                                 args[arg++] = &sample;
1477
1478                                 /* launch kernel */
1479                                 int threads_per_block;
1480                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1481
1482                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1483
1484                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1485                                 cuda_assert(cuLaunchKernel(cuShader,
1486                                                            xblocks , 1, 1, /* blocks */
1487                                                            threads_per_block, 1, 1, /* threads */
1488                                                            0, 0, args, 0));
1489
1490                                 cuda_assert(cuCtxSynchronize());
1491
1492                                 if(task.get_cancel()) {
1493                                         canceled = true;
1494                                         break;
1495                                 }
1496                         }
1497
1498                         task.update_progress(NULL);
1499                 }
1500
1501                 cuda_pop_context();
1502         }
1503
1504         CUdeviceptr map_pixels(device_ptr mem)
1505         {
1506                 if(!background) {
1507                         PixelMem pmem = pixel_mem_map[mem];
1508                         CUdeviceptr buffer;
1509
1510                         size_t bytes;
1511                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1512                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1513
1514                         return buffer;
1515                 }
1516
1517                 return cuda_device_ptr(mem);
1518         }
1519
1520         void unmap_pixels(device_ptr mem)
1521         {
1522                 if(!background) {
1523                         PixelMem pmem = pixel_mem_map[mem];
1524
1525                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1526                 }
1527         }
1528
1529         void pixels_alloc(device_memory& mem)
1530         {
1531                 if(!background) {
1532                         PixelMem pmem;
1533
1534                         pmem.w = mem.data_width;
1535                         pmem.h = mem.data_height;
1536
1537                         cuda_push_context();
1538
1539                         glGenBuffers(1, &pmem.cuPBO);
1540                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1541                         if(mem.data_type == TYPE_HALF)
1542                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1543                         else
1544                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1545
1546                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1547
1548                         glGenTextures(1, &pmem.cuTexId);
1549                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1550                         if(mem.data_type == TYPE_HALF)
1551                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1552                         else
1553                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1554                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1555                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1556                         glBindTexture(GL_TEXTURE_2D, 0);
1557
1558                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1559
1560                         if(result == CUDA_SUCCESS) {
1561                                 cuda_pop_context();
1562
1563                                 mem.device_pointer = pmem.cuTexId;
1564                                 pixel_mem_map[mem.device_pointer] = pmem;
1565
1566                                 mem.device_size = mem.memory_size();
1567                                 stats.mem_alloc(mem.device_size);
1568
1569                                 return;
1570                         }
1571                         else {
1572                                 /* failed to register buffer, fallback to no interop */
1573                                 glDeleteBuffers(1, &pmem.cuPBO);
1574                                 glDeleteTextures(1, &pmem.cuTexId);
1575
1576                                 cuda_pop_context();
1577
1578                                 background = true;
1579                         }
1580                 }
1581
1582                 Device::pixels_alloc(mem);
1583         }
1584
1585         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1586         {
1587                 if(!background) {
1588                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1589
1590                         cuda_push_context();
1591
1592                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1593                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1594                         size_t offset = sizeof(uchar)*4*y*w;
1595                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1596                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1597                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1598
1599                         cuda_pop_context();
1600
1601                         return;
1602                 }
1603
1604                 Device::pixels_copy_from(mem, y, w, h);
1605         }
1606
1607         void pixels_free(device_memory& mem)
1608         {
1609                 if(mem.device_pointer) {
1610                         if(!background) {
1611                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1612
1613                                 cuda_push_context();
1614
1615                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1616                                 glDeleteBuffers(1, &pmem.cuPBO);
1617                                 glDeleteTextures(1, &pmem.cuTexId);
1618
1619                                 cuda_pop_context();
1620
1621                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1622                                 mem.device_pointer = 0;
1623
1624                                 stats.mem_free(mem.device_size);
1625                                 mem.device_size = 0;
1626
1627                                 return;
1628                         }
1629
1630                         Device::pixels_free(mem);
1631                 }
1632         }
1633
1634         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1635                 const DeviceDrawParams &draw_params)
1636         {
1637                 if(!background) {
1638                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1639                         float *vpointer;
1640
1641                         cuda_push_context();
1642
1643                         /* for multi devices, this assumes the inefficient method that we allocate
1644                          * all pixels on the device even though we only render to a subset */
1645                         size_t offset = 4*y*w;
1646
1647                         if(mem.data_type == TYPE_HALF)
1648                                 offset *= sizeof(GLhalf);
1649                         else
1650                                 offset *= sizeof(uint8_t);
1651
1652                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1653                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1654                         if(mem.data_type == TYPE_HALF)
1655                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1656                         else
1657                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1658                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1659
1660                         glEnable(GL_TEXTURE_2D);
1661
1662                         if(transparent) {
1663                                 glEnable(GL_BLEND);
1664                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1665                         }
1666
1667                         glColor3f(1.0f, 1.0f, 1.0f);
1668
1669                         if(draw_params.bind_display_space_shader_cb) {
1670                                 draw_params.bind_display_space_shader_cb();
1671                         }
1672
1673                         if(!vertex_buffer)
1674                                 glGenBuffers(1, &vertex_buffer);
1675
1676                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1677                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1678                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1679
1680                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1681
1682                         if(vpointer) {
1683                                 /* texture coordinate - vertex pair */
1684                                 vpointer[0] = 0.0f;
1685                                 vpointer[1] = 0.0f;
1686                                 vpointer[2] = dx;
1687                                 vpointer[3] = dy;
1688
1689                                 vpointer[4] = (float)w/(float)pmem.w;
1690                                 vpointer[5] = 0.0f;
1691                                 vpointer[6] = (float)width + dx;
1692                                 vpointer[7] = dy;
1693
1694                                 vpointer[8] = (float)w/(float)pmem.w;
1695                                 vpointer[9] = (float)h/(float)pmem.h;
1696                                 vpointer[10] = (float)width + dx;
1697                                 vpointer[11] = (float)height + dy;
1698
1699                                 vpointer[12] = 0.0f;
1700                                 vpointer[13] = (float)h/(float)pmem.h;
1701                                 vpointer[14] = dx;
1702                                 vpointer[15] = (float)height + dy;
1703
1704                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1705                         }
1706
1707                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1708                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1709
1710                         glEnableClientState(GL_VERTEX_ARRAY);
1711                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1712
1713                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1714
1715                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1716                         glDisableClientState(GL_VERTEX_ARRAY);
1717
1718                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1719
1720                         if(draw_params.unbind_display_space_shader_cb) {
1721                                 draw_params.unbind_display_space_shader_cb();
1722                         }
1723
1724                         if(transparent)
1725                                 glDisable(GL_BLEND);
1726
1727                         glBindTexture(GL_TEXTURE_2D, 0);
1728                         glDisable(GL_TEXTURE_2D);
1729
1730                         cuda_pop_context();
1731
1732                         return;
1733                 }
1734
1735                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1736         }
1737
1738         void thread_run(DeviceTask *task)
1739         {
1740                 if(task->type == DeviceTask::RENDER) {
1741                         RenderTile tile;
1742
1743                         bool branched = task->integrator_branched;
1744
1745                         /* Upload Bindless Mapping */
1746                         load_bindless_mapping();
1747
1748                         DeviceRequestedFeatures requested_features;
1749                         if(use_split_kernel()) {
1750                                 if(!use_adaptive_compilation()) {
1751                                         requested_features.max_closure = 64;
1752                                 }
1753
1754                                 if(split_kernel == NULL) {
1755                                         split_kernel = new CUDASplitKernel(this);
1756                                         split_kernel->load_kernels(requested_features);
1757                                 }
1758                         }
1759
1760                         /* keep rendering tiles until done */
1761                         while(task->acquire_tile(this, tile)) {
1762                                 if(tile.task == RenderTile::PATH_TRACE) {
1763                                         if(use_split_kernel()) {
1764                                                 device_memory void_buffer;
1765                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
1766                                         }
1767                                         else {
1768                                                 int start_sample = tile.start_sample;
1769                                                 int end_sample = tile.start_sample + tile.num_samples;
1770
1771                                                 for(int sample = start_sample; sample < end_sample; sample++) {
1772                                                         if(task->get_cancel()) {
1773                                                                 if(task->need_finish_queue == false)
1774                                                                         break;
1775                                                         }
1776
1777                                                         path_trace(tile, sample, branched);
1778
1779                                                         tile.sample = sample + 1;
1780
1781                                                         task->update_progress(&tile, tile.w*tile.h);
1782                                                 }
1783                                         }
1784                                 }
1785                                 else if(tile.task == RenderTile::DENOISE) {
1786                                         tile.sample = tile.start_sample + tile.num_samples;
1787
1788                                         denoise(tile, *task);
1789
1790                                         task->update_progress(&tile, tile.w*tile.h);
1791                                 }
1792
1793                                 task->release_tile(tile);
1794
1795                                 if(task->get_cancel()) {
1796                                         if(task->need_finish_queue == false)
1797                                                 break;
1798                                 }
1799                         }
1800                 }
1801                 else if(task->type == DeviceTask::SHADER) {
1802                         /* Upload Bindless Mapping */
1803                         load_bindless_mapping();
1804
1805                         shader(*task);
1806
1807                         cuda_push_context();
1808                         cuda_assert(cuCtxSynchronize());
1809                         cuda_pop_context();
1810                 }
1811         }
1812
1813         class CUDADeviceTask : public DeviceTask {
1814         public:
1815                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1816                 : DeviceTask(task)
1817                 {
1818                         run = function_bind(&CUDADevice::thread_run, device, this);
1819                 }
1820         };
1821
1822         int get_split_task_count(DeviceTask& /*task*/)
1823         {
1824                 return 1;
1825         }
1826
1827         void task_add(DeviceTask& task)
1828         {
1829                 if(task.type == DeviceTask::FILM_CONVERT) {
1830                         /* must be done in main thread due to opengl access */
1831                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1832
1833                         cuda_push_context();
1834                         cuda_assert(cuCtxSynchronize());
1835                         cuda_pop_context();
1836                 }
1837                 else {
1838                         task_pool.push(new CUDADeviceTask(this, task));
1839                 }
1840         }
1841
1842         void task_wait()
1843         {
1844                 task_pool.wait();
1845         }
1846
1847         void task_cancel()
1848         {
1849                 task_pool.cancel();
1850         }
1851
1852         friend class CUDASplitKernelFunction;
1853         friend class CUDASplitKernel;
1854 };
1855
1856 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1857  * now that the definition of that class is complete
1858  */
1859 #undef cuda_assert
1860 #define cuda_assert(stmt) \
1861         { \
1862                 CUresult result = stmt; \
1863                 \
1864                 if(result != CUDA_SUCCESS) { \
1865                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1866                         if(device->error_msg == "") \
1867                                 device->error_msg = message; \
1868                         fprintf(stderr, "%s\n", message.c_str()); \
1869                         /*cuda_abort();*/ \
1870                         device->cuda_error_documentation(); \
1871                 } \
1872         } (void)0
1873
1874 /* split kernel */
1875
1876 class CUDASplitKernelFunction : public SplitKernelFunction{
1877         CUDADevice* device;
1878         CUfunction func;
1879 public:
1880         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
1881
1882         /* enqueue the kernel, returns false if there is an error */
1883         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
1884         {
1885                 return enqueue(dim, NULL);
1886         }
1887
1888         /* enqueue the kernel, returns false if there is an error */
1889         bool enqueue(const KernelDimensions &dim, void *args[])
1890         {
1891                 device->cuda_push_context();
1892
1893                 if(device->have_error())
1894                         return false;
1895
1896                 /* we ignore dim.local_size for now, as this is faster */
1897                 int threads_per_block;
1898                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
1899
1900                 int xthreads = (int)sqrt(threads_per_block);
1901                 int ythreads = (int)sqrt(threads_per_block);
1902
1903                 int xblocks = (dim.global_size[0] + xthreads - 1)/xthreads;
1904                 int yblocks = (dim.global_size[1] + ythreads - 1)/ythreads;
1905
1906                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
1907
1908                 cuda_assert(cuLaunchKernel(func,
1909                                            xblocks , yblocks, 1, /* blocks */
1910                                            xthreads, ythreads, 1, /* threads */
1911                                            0, 0, args, 0));
1912
1913                 device->cuda_pop_context();
1914
1915                 return !device->have_error();
1916         }
1917 };
1918
1919 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
1920 {
1921 }
1922
1923 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
1924 {
1925         device_vector<uint64_t> size_buffer;
1926         size_buffer.resize(1);
1927         device->mem_alloc(NULL, size_buffer, MEM_READ_WRITE);
1928
1929         device->cuda_push_context();
1930
1931         uint threads = num_threads;
1932         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
1933
1934         struct args_t {
1935                 uint* num_threads;
1936                 CUdeviceptr* size;
1937         };
1938
1939         args_t args = {
1940                 &threads,
1941                 &d_size
1942         };
1943
1944         CUfunction state_buffer_size;
1945         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
1946
1947         cuda_assert(cuLaunchKernel(state_buffer_size,
1948                                    1, 1, 1,
1949                                    1, 1, 1,
1950                                    0, 0, (void**)&args, 0));
1951
1952         device->cuda_pop_context();
1953
1954         device->mem_copy_from(size_buffer, 0, 1, 1, sizeof(uint64_t));
1955         device->mem_free(size_buffer);
1956
1957         return *size_buffer.get_data();
1958 }
1959
1960 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
1961                                     RenderTile& rtile,
1962                                     int num_global_elements,
1963                                     device_memory& /*kernel_globals*/,
1964                                     device_memory& /*kernel_data*/,
1965                                     device_memory& split_data,
1966                                     device_memory& ray_state,
1967                                     device_memory& queue_index,
1968                                     device_memory& use_queues_flag,
1969                                     device_memory& work_pool_wgs)
1970 {
1971         device->cuda_push_context();
1972
1973         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
1974         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
1975         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
1976         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
1977         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
1978
1979         CUdeviceptr d_rng_state = device->cuda_device_ptr(rtile.rng_state);
1980         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
1981
1982         int end_sample = rtile.start_sample + rtile.num_samples;
1983         int queue_size = dim.global_size[0] * dim.global_size[1];
1984
1985         struct args_t {
1986                 CUdeviceptr* split_data_buffer;
1987                 int* num_elements;
1988                 CUdeviceptr* ray_state;
1989                 CUdeviceptr* rng_state;
1990                 int* start_sample;
1991                 int* end_sample;
1992                 int* sx;
1993                 int* sy;
1994                 int* sw;
1995                 int* sh;
1996                 int* offset;
1997                 int* stride;
1998                 CUdeviceptr* queue_index;
1999                 int* queuesize;
2000                 CUdeviceptr* use_queues_flag;
2001                 CUdeviceptr* work_pool_wgs;
2002                 int* num_samples;
2003                 CUdeviceptr* buffer;
2004         };
2005
2006         args_t args = {
2007                 &d_split_data,
2008                 &num_global_elements,
2009                 &d_ray_state,
2010                 &d_rng_state,
2011                 &rtile.start_sample,
2012                 &end_sample,
2013                 &rtile.x,
2014                 &rtile.y,
2015                 &rtile.w,
2016                 &rtile.h,
2017                 &rtile.offset,
2018                 &rtile.stride,
2019                 &d_queue_index,
2020                 &queue_size,
2021                 &d_use_queues_flag,
2022                 &d_work_pool_wgs,
2023                 &rtile.num_samples,
2024                 &d_buffer
2025         };
2026
2027         CUfunction data_init;
2028         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2029         if(device->have_error()) {
2030                 return false;
2031         }
2032
2033         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2034
2035         device->cuda_pop_context();
2036
2037         return !device->have_error();
2038 }
2039
2040 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&)
2041 {
2042         CUfunction func;
2043
2044         device->cuda_push_context();
2045
2046         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2047         if(device->have_error()) {
2048                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2049                 return NULL;
2050         }
2051
2052         device->cuda_pop_context();
2053
2054         return new CUDASplitKernelFunction(device, func);
2055 }
2056
2057 int2 CUDASplitKernel::split_kernel_local_size()
2058 {
2059         return make_int2(32, 1);
2060 }
2061
2062 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2063 {
2064         size_t free;
2065         size_t total;
2066
2067         device->cuda_push_context();
2068         cuda_assert(cuMemGetInfo(&free, &total));
2069         device->cuda_pop_context();
2070
2071         VLOG(1) << "Maximum device allocation size: "
2072                 << string_human_readable_number(free) << " bytes. ("
2073                 << string_human_readable_size(free) << ").";
2074
2075         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2076         size_t side = round_down((int)sqrt(num_elements), 32);
2077         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2078         VLOG(1) << "Global size: " << global_size << ".";
2079         return global_size;
2080 }
2081
2082 bool device_cuda_init(void)
2083 {
2084 #ifdef WITH_CUDA_DYNLOAD
2085         static bool initialized = false;
2086         static bool result = false;
2087
2088         if(initialized)
2089                 return result;
2090
2091         initialized = true;
2092         int cuew_result = cuewInit();
2093         if(cuew_result == CUEW_SUCCESS) {
2094                 VLOG(1) << "CUEW initialization succeeded";
2095                 if(CUDADevice::have_precompiled_kernels()) {
2096                         VLOG(1) << "Found precompiled kernels";
2097                         result = true;
2098                 }
2099 #ifndef _WIN32
2100                 else if(cuewCompilerPath() != NULL) {
2101                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2102                         result = true;
2103                 }
2104                 else {
2105                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
2106                                 << " unable to use CUDA";
2107                 }
2108 #endif
2109         }
2110         else {
2111                 VLOG(1) << "CUEW initialization failed: "
2112                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2113                             ? "Error setting up atexit() handler"
2114                             : "Error opening the library");
2115         }
2116
2117         return result;
2118 #else  /* WITH_CUDA_DYNLOAD */
2119         return true;
2120 #endif /* WITH_CUDA_DYNLOAD */
2121 }
2122
2123 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2124 {
2125         return new CUDADevice(info, stats, background);
2126 }
2127
2128 void device_cuda_info(vector<DeviceInfo>& devices)
2129 {
2130         CUresult result;
2131         int count = 0;
2132
2133         result = cuInit(0);
2134         if(result != CUDA_SUCCESS) {
2135                 if(result != CUDA_ERROR_NO_DEVICE)
2136                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2137                 return;
2138         }
2139
2140         result = cuDeviceGetCount(&count);
2141         if(result != CUDA_SUCCESS) {
2142                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2143                 return;
2144         }
2145
2146         vector<DeviceInfo> display_devices;
2147
2148         for(int num = 0; num < count; num++) {
2149                 char name[256];
2150                 int attr;
2151
2152                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
2153                         continue;
2154
2155                 int major;
2156                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2157                 if(major < 2) {
2158                         continue;
2159                 }
2160
2161                 DeviceInfo info;
2162
2163                 info.type = DEVICE_CUDA;
2164                 info.description = string(name);
2165                 info.num = num;
2166
2167                 info.advanced_shading = (major >= 2);
2168                 info.has_bindless_textures = (major >= 3);
2169                 info.pack_images = false;
2170
2171                 int pci_location[3] = {0, 0, 0};
2172                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2173                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2174                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2175                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2176                                         name,
2177                                         (unsigned int)pci_location[0],
2178                                         (unsigned int)pci_location[1],
2179                                         (unsigned int)pci_location[2]);
2180
2181                 /* if device has a kernel timeout, assume it is used for display */
2182                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
2183                         info.description += " (Display)";
2184                         info.display_device = true;
2185                         display_devices.push_back(info);
2186                 }
2187                 else
2188                         devices.push_back(info);
2189         }
2190
2191         if(!display_devices.empty())
2192                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2193 }
2194
2195 string device_cuda_capabilities(void)
2196 {
2197         CUresult result = cuInit(0);
2198         if(result != CUDA_SUCCESS) {
2199                 if(result != CUDA_ERROR_NO_DEVICE) {
2200                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2201                 }
2202                 return "No CUDA device found\n";
2203         }
2204
2205         int count;
2206         result = cuDeviceGetCount(&count);
2207         if(result != CUDA_SUCCESS) {
2208                 return string("Error getting devices: ") + cuewErrorString(result);
2209         }
2210
2211         string capabilities = "";
2212         for(int num = 0; num < count; num++) {
2213                 char name[256];
2214                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2215                         continue;
2216                 }
2217                 capabilities += string("\t") + name + "\n";
2218                 int value;
2219 #define GET_ATTR(attr) \
2220                 { \
2221                         if(cuDeviceGetAttribute(&value, \
2222                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2223                                                 num) == CUDA_SUCCESS) \
2224                         { \
2225                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2226                                                               value); \
2227                         } \
2228                 } (void)0
2229                 /* TODO(sergey): Strip all attributes which are not useful for us
2230                  * or does not depend on the driver.
2231                  */
2232                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2233                 GET_ATTR(MAX_BLOCK_DIM_X);
2234                 GET_ATTR(MAX_BLOCK_DIM_Y);
2235                 GET_ATTR(MAX_BLOCK_DIM_Z);
2236                 GET_ATTR(MAX_GRID_DIM_X);
2237                 GET_ATTR(MAX_GRID_DIM_Y);
2238                 GET_ATTR(MAX_GRID_DIM_Z);
2239                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2240                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2241                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2242                 GET_ATTR(WARP_SIZE);
2243                 GET_ATTR(MAX_PITCH);
2244                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2245                 GET_ATTR(REGISTERS_PER_BLOCK);
2246                 GET_ATTR(CLOCK_RATE);
2247                 GET_ATTR(TEXTURE_ALIGNMENT);
2248                 GET_ATTR(GPU_OVERLAP);
2249                 GET_ATTR(MULTIPROCESSOR_COUNT);
2250                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2251                 GET_ATTR(INTEGRATED);
2252                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2253                 GET_ATTR(COMPUTE_MODE);
2254                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2255                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2256                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2257                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2258                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2259                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2260                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2261                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2262                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2263                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2264                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2265                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2266                 GET_ATTR(SURFACE_ALIGNMENT);
2267                 GET_ATTR(CONCURRENT_KERNELS);
2268                 GET_ATTR(ECC_ENABLED);
2269                 GET_ATTR(TCC_DRIVER);
2270                 GET_ATTR(MEMORY_CLOCK_RATE);
2271                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2272                 GET_ATTR(L2_CACHE_SIZE);
2273                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2274                 GET_ATTR(ASYNC_ENGINE_COUNT);
2275                 GET_ATTR(UNIFIED_ADDRESSING);
2276                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2277                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2278                 GET_ATTR(CAN_TEX2D_GATHER);
2279                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2280                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2281                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2282                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2283                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2284                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2285                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2286                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2287                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2288                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2289                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2290                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2291                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2292                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2293                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2294                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2295                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2296                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2297                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2298                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2299                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2300                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2301                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2302                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2303                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2304                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2305                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2306                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2307                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2308                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2309                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2310                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2311                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2312                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2313                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2314                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2315                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2316                 GET_ATTR(MANAGED_MEMORY);
2317                 GET_ATTR(MULTI_GPU_BOARD);
2318                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2319 #undef GET_ATTR
2320                 capabilities += "\n";
2321         }
2322
2323         return capabilities;
2324 }
2325
2326 CCL_NAMESPACE_END