Code refactor: add WorkTile struct for passing work to kernel.
[blender-staging.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_logging.h"
41 #include "util/util_map.h"
42 #include "util/util_md5.h"
43 #include "util/util_opengl.h"
44 #include "util/util_path.h"
45 #include "util/util_string.h"
46 #include "util/util_system.h"
47 #include "util/util_types.h"
48 #include "util/util_time.h"
49
50 #include "kernel/split/kernel_split_data_types.h"
51
52 CCL_NAMESPACE_BEGIN
53
54 #ifndef WITH_CUDA_DYNLOAD
55
56 /* Transparently implement some functions, so majority of the file does not need
57  * to worry about difference between dynamically loaded and linked CUDA at all.
58  */
59
60 namespace {
61
62 const char *cuewErrorString(CUresult result)
63 {
64         /* We can only give error code here without major code duplication, that
65          * should be enough since dynamic loading is only being disabled by folks
66          * who knows what they're doing anyway.
67          *
68          * NOTE: Avoid call from several threads.
69          */
70         static string error;
71         error = string_printf("%d", result);
72         return error.c_str();
73 }
74
75 const char *cuewCompilerPath(void)
76 {
77         return CYCLES_CUDA_NVCC_EXECUTABLE;
78 }
79
80 int cuewCompilerVersion(void)
81 {
82         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
83 }
84
85 }  /* namespace */
86 #endif  /* WITH_CUDA_DYNLOAD */
87
88 class CUDADevice;
89
90 class CUDASplitKernel : public DeviceSplitKernel {
91         CUDADevice *device;
92 public:
93         explicit CUDASplitKernel(CUDADevice *device);
94
95         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
96
97         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
98                                                     RenderTile& rtile,
99                                                     int num_global_elements,
100                                                     device_memory& kernel_globals,
101                                                     device_memory& kernel_data_,
102                                                     device_memory& split_data,
103                                                     device_memory& ray_state,
104                                                     device_memory& queue_index,
105                                                     device_memory& use_queues_flag,
106                                                     device_memory& work_pool_wgs);
107
108         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
109                                                                const DeviceRequestedFeatures&);
110         virtual int2 split_kernel_local_size();
111         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
112 };
113
114 /* Utility to push/pop CUDA context. */
115 class CUDAContextScope {
116 public:
117         CUDAContextScope(CUDADevice *device);
118         ~CUDAContextScope();
119
120 private:
121         CUDADevice *device;
122 };
123
124 class CUDADevice : public Device
125 {
126 public:
127         DedicatedTaskPool task_pool;
128         CUdevice cuDevice;
129         CUcontext cuContext;
130         CUmodule cuModule, cuFilterModule;
131         map<device_ptr, bool> tex_interp_map;
132         map<device_ptr, uint> tex_bindless_map;
133         int cuDevId;
134         int cuDevArchitecture;
135         bool first_error;
136         CUDASplitKernel *split_kernel;
137
138         struct PixelMem {
139                 GLuint cuPBO;
140                 CUgraphicsResource cuPBOresource;
141                 GLuint cuTexId;
142                 int w, h;
143         };
144
145         map<device_ptr, PixelMem> pixel_mem_map;
146
147         /* Bindless Textures */
148         device_vector<uint> bindless_mapping;
149         bool need_bindless_mapping;
150
151         CUdeviceptr cuda_device_ptr(device_ptr mem)
152         {
153                 return (CUdeviceptr)mem;
154         }
155
156         static bool have_precompiled_kernels()
157         {
158                 string cubins_path = path_get("lib");
159                 return path_exists(cubins_path);
160         }
161
162         virtual bool show_samples() const
163         {
164                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
165                 return true;
166         }
167
168 /*#ifdef NDEBUG
169 #define cuda_abort()
170 #else
171 #define cuda_abort() abort()
172 #endif*/
173         void cuda_error_documentation()
174         {
175                 if(first_error) {
176                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
177                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
178                         first_error = false;
179                 }
180         }
181
182 #define cuda_assert(stmt) \
183         { \
184                 CUresult result = stmt; \
185                 \
186                 if(result != CUDA_SUCCESS) { \
187                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
188                         if(error_msg == "") \
189                                 error_msg = message; \
190                         fprintf(stderr, "%s\n", message.c_str()); \
191                         /*cuda_abort();*/ \
192                         cuda_error_documentation(); \
193                 } \
194         } (void)0
195
196         bool cuda_error_(CUresult result, const string& stmt)
197         {
198                 if(result == CUDA_SUCCESS)
199                         return false;
200
201                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
202                 if(error_msg == "")
203                         error_msg = message;
204                 fprintf(stderr, "%s\n", message.c_str());
205                 cuda_error_documentation();
206                 return true;
207         }
208
209 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
210
211         void cuda_error_message(const string& message)
212         {
213                 if(error_msg == "")
214                         error_msg = message;
215                 fprintf(stderr, "%s\n", message.c_str());
216                 cuda_error_documentation();
217         }
218
219         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
220         : Device(info, stats, background_)
221         {
222                 first_error = true;
223                 background = background_;
224
225                 cuDevId = info.num;
226                 cuDevice = 0;
227                 cuContext = 0;
228
229                 cuModule = 0;
230                 cuFilterModule = 0;
231
232                 split_kernel = NULL;
233
234                 need_bindless_mapping = false;
235
236                 /* intialize */
237                 if(cuda_error(cuInit(0)))
238                         return;
239
240                 /* setup device and context */
241                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
242                         return;
243
244                 CUresult result;
245
246                 if(background) {
247                         result = cuCtxCreate(&cuContext, 0, cuDevice);
248                 }
249                 else {
250                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
251
252                         if(result != CUDA_SUCCESS) {
253                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
254                                 background = true;
255                         }
256                 }
257
258                 if(cuda_error_(result, "cuCtxCreate"))
259                         return;
260
261                 int major, minor;
262                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
263                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
264                 cuDevArchitecture = major*100 + minor*10;
265
266                 /* Pop context set by cuCtxCreate. */
267                 cuCtxPopCurrent(NULL);
268         }
269
270         ~CUDADevice()
271         {
272                 task_pool.stop();
273
274                 delete split_kernel;
275
276                 if(info.has_bindless_textures) {
277                         tex_free(bindless_mapping);
278                 }
279
280                 cuda_assert(cuCtxDestroy(cuContext));
281         }
282
283         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
284         {
285                 int major, minor;
286                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
287                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
288
289                 /* We only support sm_20 and above */
290                 if(major < 2) {
291                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
292                         return false;
293                 }
294
295                 return true;
296         }
297
298         bool use_adaptive_compilation()
299         {
300                 return DebugFlags().cuda.adaptive_compile;
301         }
302
303         bool use_split_kernel()
304         {
305                 return DebugFlags().cuda.split_kernel;
306         }
307
308         /* Common NVCC flags which stays the same regardless of shading model,
309          * kernel sources md5 and only depends on compiler or compilation settings.
310          */
311         string compile_kernel_get_common_cflags(
312                 const DeviceRequestedFeatures& requested_features,
313                 bool filter=false, bool split=false)
314         {
315                 const int cuda_version = cuewCompilerVersion();
316                 const int machine = system_cpu_bits();
317                 const string source_path = path_get("source");
318                 const string include_path = source_path;
319                 string cflags = string_printf("-m%d "
320                                               "--ptxas-options=\"-v\" "
321                                               "--use_fast_math "
322                                               "-DNVCC "
323                                               "-D__KERNEL_CUDA_VERSION__=%d "
324                                                "-I\"%s\"",
325                                               machine,
326                                               cuda_version,
327                                               include_path.c_str());
328                 if(!filter && use_adaptive_compilation()) {
329                         cflags += " " + requested_features.get_build_options();
330                 }
331                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
332                 if(extra_cflags) {
333                         cflags += string(" ") + string(extra_cflags);
334                 }
335 #ifdef WITH_CYCLES_DEBUG
336                 cflags += " -D__KERNEL_DEBUG__";
337 #endif
338
339                 if(split) {
340                         cflags += " -D__SPLIT__";
341                 }
342
343                 return cflags;
344         }
345
346         bool compile_check_compiler() {
347                 const char *nvcc = cuewCompilerPath();
348                 if(nvcc == NULL) {
349                         cuda_error_message("CUDA nvcc compiler not found. "
350                                            "Install CUDA toolkit in default location.");
351                         return false;
352                 }
353                 const int cuda_version = cuewCompilerVersion();
354                 VLOG(1) << "Found nvcc " << nvcc
355                         << ", CUDA version " << cuda_version
356                         << ".";
357                 const int major = cuda_version / 10, minor = cuda_version & 10;
358                 if(cuda_version == 0) {
359                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
360                         return false;
361                 }
362                 if(cuda_version < 80) {
363                         printf("Unsupported CUDA version %d.%d detected, "
364                                "you need CUDA 8.0 or newer.\n",
365                                major, minor);
366                         return false;
367                 }
368                 else if(cuda_version != 80) {
369                         printf("CUDA version %d.%d detected, build may succeed but only "
370                                "CUDA 8.0 is officially supported.\n",
371                                major, minor);
372                 }
373                 return true;
374         }
375
376         string compile_kernel(const DeviceRequestedFeatures& requested_features,
377                               bool filter=false, bool split=false)
378         {
379                 const char *name, *source;
380                 if(filter) {
381                         name = "filter";
382                         source = "filter.cu";
383                 }
384                 else if(split) {
385                         name = "kernel_split";
386                         source = "kernel_split.cu";
387                 }
388                 else {
389                         name = "kernel";
390                         source = "kernel.cu";
391                 }
392                 /* Compute cubin name. */
393                 int major, minor;
394                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
395                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
396
397                 /* Attempt to use kernel provided with Blender. */
398                 if(!use_adaptive_compilation()) {
399                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
400                                                                     name, major, minor));
401                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
402                         if(path_exists(cubin)) {
403                                 VLOG(1) << "Using precompiled kernel.";
404                                 return cubin;
405                         }
406                 }
407
408                 const string common_cflags =
409                         compile_kernel_get_common_cflags(requested_features, filter, split);
410
411                 /* Try to use locally compiled kernel. */
412                 const string source_path = path_get("source");
413                 const string kernel_md5 = path_files_md5_hash(source_path);
414
415                 /* We include cflags into md5 so changing cuda toolkit or changing other
416                  * compiler command line arguments makes sure cubin gets re-built.
417                  */
418                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
419
420                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
421                                                         name, major, minor,
422                                                         cubin_md5.c_str());
423                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
424                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
425                 if(path_exists(cubin)) {
426                         VLOG(1) << "Using locally compiled kernel.";
427                         return cubin;
428                 }
429
430 #ifdef _WIN32
431                 if(have_precompiled_kernels()) {
432                         if(major < 2) {
433                                 cuda_error_message(string_printf(
434                                         "CUDA device requires compute capability 2.0 or up, "
435                                         "found %d.%d. Your GPU is not supported.",
436                                         major, minor));
437                         }
438                         else {
439                                 cuda_error_message(string_printf(
440                                         "CUDA binary kernel for this graphics card compute "
441                                         "capability (%d.%d) not found.",
442                                         major, minor));
443                         }
444                         return "";
445                 }
446 #endif
447
448                 /* Compile. */
449                 if(!compile_check_compiler()) {
450                         return "";
451                 }
452                 const char *nvcc = cuewCompilerPath();
453                 const string kernel = path_join(
454                         path_join(source_path, "kernel"),
455                         path_join("kernels",
456                                   path_join("cuda", source)));
457                 double starttime = time_dt();
458                 printf("Compiling CUDA kernel ...\n");
459
460                 path_create_directories(cubin);
461
462                 string command = string_printf("\"%s\" "
463                                                "-arch=sm_%d%d "
464                                                "--cubin \"%s\" "
465                                                "-o \"%s\" "
466                                                "%s ",
467                                                nvcc,
468                                                major, minor,
469                                                kernel.c_str(),
470                                                cubin.c_str(),
471                                                common_cflags.c_str());
472
473                 printf("%s\n", command.c_str());
474
475                 if(system(command.c_str()) == -1) {
476                         cuda_error_message("Failed to execute compilation command, "
477                                            "see console for details.");
478                         return "";
479                 }
480
481                 /* Verify if compilation succeeded */
482                 if(!path_exists(cubin)) {
483                         cuda_error_message("CUDA kernel compilation failed, "
484                                            "see console for details.");
485                         return "";
486                 }
487
488                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
489
490                 return cubin;
491         }
492
493         bool load_kernels(const DeviceRequestedFeatures& requested_features)
494         {
495                 /* TODO(sergey): Support kernels re-load for CUDA devices.
496                  *
497                  * Currently re-loading kernel will invalidate memory pointers,
498                  * causing problems in cuCtxSynchronize.
499                  */
500                 if(cuFilterModule && cuModule) {
501                         VLOG(1) << "Skipping kernel reload, not currently supported.";
502                         return true;
503                 }
504
505                 /* check if cuda init succeeded */
506                 if(cuContext == 0)
507                         return false;
508
509                 /* check if GPU is supported */
510                 if(!support_device(requested_features))
511                         return false;
512
513                 /* get kernel */
514                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
515                 if(cubin == "")
516                         return false;
517
518                 string filter_cubin = compile_kernel(requested_features, true, false);
519                 if(filter_cubin == "")
520                         return false;
521
522                 /* open module */
523                 CUDAContextScope scope(this);
524
525                 string cubin_data;
526                 CUresult result;
527
528                 if(path_read_text(cubin, cubin_data))
529                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
530                 else
531                         result = CUDA_ERROR_FILE_NOT_FOUND;
532
533                 if(cuda_error_(result, "cuModuleLoad"))
534                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
535
536                 if(path_read_text(filter_cubin, cubin_data))
537                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
538                 else
539                         result = CUDA_ERROR_FILE_NOT_FOUND;
540
541                 if(cuda_error_(result, "cuModuleLoad"))
542                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
543
544                 return (result == CUDA_SUCCESS);
545         }
546
547         void load_bindless_mapping()
548         {
549                 if(info.has_bindless_textures && need_bindless_mapping) {
550                         tex_free(bindless_mapping);
551                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
552                         need_bindless_mapping = false;
553                 }
554         }
555
556         void mem_alloc(const char *name, device_memory& mem, MemoryType /*type*/)
557         {
558                 CUDAContextScope scope(this);
559
560                 if(name) {
561                         VLOG(1) << "Buffer allocate: " << name << ", "
562                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
563                                 << string_human_readable_size(mem.memory_size()) << ")";
564                 }
565
566                 CUdeviceptr device_pointer;
567                 size_t size = mem.memory_size();
568                 cuda_assert(cuMemAlloc(&device_pointer, size));
569                 mem.device_pointer = (device_ptr)device_pointer;
570                 mem.device_size = size;
571                 stats.mem_alloc(size);
572         }
573
574         void mem_copy_to(device_memory& mem)
575         {
576                 CUDAContextScope scope(this);
577
578                 if(mem.device_pointer)
579                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
580         }
581
582         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
583         {
584                 CUDAContextScope scope(this);
585                 size_t offset = elem*y*w;
586                 size_t size = elem*w*h;
587
588                 if(mem.device_pointer) {
589                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
590                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
591                 }
592                 else {
593                         memset((char*)mem.data_pointer + offset, 0, size);
594                 }
595         }
596
597         void mem_zero(device_memory& mem)
598         {
599                 if(mem.data_pointer) {
600                         memset((void*)mem.data_pointer, 0, mem.memory_size());
601                 }
602
603                 if(mem.device_pointer) {
604                         CUDAContextScope scope(this);
605                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
606                 }
607         }
608
609         void mem_free(device_memory& mem)
610         {
611                 if(mem.device_pointer) {
612                         CUDAContextScope scope(this);
613                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
614
615                         mem.device_pointer = 0;
616
617                         stats.mem_free(mem.device_size);
618                         mem.device_size = 0;
619                 }
620         }
621
622         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/, MemoryType /*type*/)
623         {
624                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
625         }
626
627         void const_copy_to(const char *name, void *host, size_t size)
628         {
629                 CUDAContextScope scope(this);
630                 CUdeviceptr mem;
631                 size_t bytes;
632
633                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
634                 //assert(bytes == size);
635                 cuda_assert(cuMemcpyHtoD(mem, host, size));
636         }
637
638         void tex_alloc(const char *name,
639                        device_memory& mem,
640                        InterpolationType interpolation,
641                        ExtensionType extension)
642         {
643                 CUDAContextScope scope(this);
644
645                 VLOG(1) << "Texture allocate: " << name << ", "
646                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
647                         << string_human_readable_size(mem.memory_size()) << ")";
648
649                 /* Check if we are on sm_30 or above.
650                  * We use arrays and bindles textures for storage there */
651                 bool has_bindless_textures = info.has_bindless_textures;
652
653                 /* General variables for both architectures */
654                 string bind_name = name;
655                 size_t dsize = datatype_size(mem.data_type);
656                 size_t size = mem.memory_size();
657
658                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
659                 switch(extension) {
660                         case EXTENSION_REPEAT:
661                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
662                                 break;
663                         case EXTENSION_EXTEND:
664                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
665                                 break;
666                         case EXTENSION_CLIP:
667                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
668                                 break;
669                         default:
670                                 assert(0);
671                                 break;
672                 }
673
674                 CUfilter_mode filter_mode;
675                 if(interpolation == INTERPOLATION_CLOSEST) {
676                         filter_mode = CU_TR_FILTER_MODE_POINT;
677                 }
678                 else {
679                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
680                 }
681
682                 CUarray_format_enum format;
683                 switch(mem.data_type) {
684                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
685                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
686                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
687                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
688                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
689                         default: assert(0); return;
690                 }
691
692                 /* General variables for Fermi */
693                 CUtexref texref = NULL;
694
695                 if(!has_bindless_textures) {
696                         if(mem.data_depth > 1) {
697                                 /* Kernel uses different bind names for 2d and 3d float textures,
698                                  * so we have to adjust couple of things here.
699                                  */
700                                 vector<string> tokens;
701                                 string_split(tokens, name, "_");
702                                 bind_name = string_printf("__tex_image_%s_3d_%s",
703                                                           tokens[2].c_str(),
704                                                           tokens[3].c_str());
705                         }
706
707                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
708
709                         if(!texref) {
710                                 return;
711                         }
712                 }
713
714                 /* Data Storage */
715                 if(interpolation == INTERPOLATION_NONE) {
716                         if(has_bindless_textures) {
717                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
718                                 mem_copy_to(mem);
719
720                                 CUdeviceptr cumem;
721                                 size_t cubytes;
722
723                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
724
725                                 if(cubytes == 8) {
726                                         /* 64 bit device pointer */
727                                         uint64_t ptr = mem.device_pointer;
728                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
729                                 }
730                                 else {
731                                         /* 32 bit device pointer */
732                                         uint32_t ptr = (uint32_t)mem.device_pointer;
733                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
734                                 }
735                         }
736                         else {
737                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
738                                 mem_copy_to(mem);
739
740                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
741                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
742                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
743                         }
744                 }
745                 /* Texture Storage */
746                 else {
747                         CUarray handle = NULL;
748
749                         if(mem.data_depth > 1) {
750                                 CUDA_ARRAY3D_DESCRIPTOR desc;
751
752                                 desc.Width = mem.data_width;
753                                 desc.Height = mem.data_height;
754                                 desc.Depth = mem.data_depth;
755                                 desc.Format = format;
756                                 desc.NumChannels = mem.data_elements;
757                                 desc.Flags = 0;
758
759                                 cuda_assert(cuArray3DCreate(&handle, &desc));
760                         }
761                         else {
762                                 CUDA_ARRAY_DESCRIPTOR desc;
763
764                                 desc.Width = mem.data_width;
765                                 desc.Height = mem.data_height;
766                                 desc.Format = format;
767                                 desc.NumChannels = mem.data_elements;
768
769                                 cuda_assert(cuArrayCreate(&handle, &desc));
770                         }
771
772                         if(!handle) {
773                                 return;
774                         }
775
776                         /* Allocate 3D, 2D or 1D memory */
777                         if(mem.data_depth > 1) {
778                                 CUDA_MEMCPY3D param;
779                                 memset(&param, 0, sizeof(param));
780                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
781                                 param.dstArray = handle;
782                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
783                                 param.srcHost = (void*)mem.data_pointer;
784                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
785                                 param.WidthInBytes = param.srcPitch;
786                                 param.Height = mem.data_height;
787                                 param.Depth = mem.data_depth;
788
789                                 cuda_assert(cuMemcpy3D(&param));
790                         }
791                         else if(mem.data_height > 1) {
792                                 CUDA_MEMCPY2D param;
793                                 memset(&param, 0, sizeof(param));
794                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
795                                 param.dstArray = handle;
796                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
797                                 param.srcHost = (void*)mem.data_pointer;
798                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
799                                 param.WidthInBytes = param.srcPitch;
800                                 param.Height = mem.data_height;
801
802                                 cuda_assert(cuMemcpy2D(&param));
803                         }
804                         else
805                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
806
807                         /* Fermi and Kepler */
808                         mem.device_pointer = (device_ptr)handle;
809                         mem.device_size = size;
810
811                         stats.mem_alloc(size);
812
813                         /* Bindless Textures - Kepler */
814                         if(has_bindless_textures) {
815                                 int flat_slot = 0;
816                                 if(string_startswith(name, "__tex_image")) {
817                                         int pos =  string(name).rfind("_");
818                                         flat_slot = atoi(name + pos + 1);
819                                 }
820                                 else {
821                                         assert(0);
822                                 }
823
824                                 CUDA_RESOURCE_DESC resDesc;
825                                 memset(&resDesc, 0, sizeof(resDesc));
826                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
827                                 resDesc.res.array.hArray = handle;
828                                 resDesc.flags = 0;
829
830                                 CUDA_TEXTURE_DESC texDesc;
831                                 memset(&texDesc, 0, sizeof(texDesc));
832                                 texDesc.addressMode[0] = address_mode;
833                                 texDesc.addressMode[1] = address_mode;
834                                 texDesc.addressMode[2] = address_mode;
835                                 texDesc.filterMode = filter_mode;
836                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
837
838                                 CUtexObject tex = 0;
839                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
840
841                                 /* Safety check */
842                                 if((uint)tex > UINT_MAX) {
843                                         assert(0);
844                                 }
845
846                                 /* Resize once */
847                                 if(flat_slot >= bindless_mapping.size()) {
848                                         /* Allocate some slots in advance, to reduce amount
849                                          * of re-allocations.
850                                          */
851                                         bindless_mapping.resize(flat_slot + 128);
852                                 }
853
854                                 /* Set Mapping and tag that we need to (re-)upload to device */
855                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
856                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
857                                 need_bindless_mapping = true;
858                         }
859                         /* Regular Textures - Fermi */
860                         else {
861                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
862                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
863                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
864                         }
865                 }
866
867                 /* Fermi, Data and Image Textures */
868                 if(!has_bindless_textures) {
869                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
870                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
871                         if(mem.data_depth > 1) {
872                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
873                         }
874
875                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
876                 }
877
878                 /* Fermi and Kepler */
879                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
880         }
881
882         void tex_free(device_memory& mem)
883         {
884                 if(mem.device_pointer) {
885                         if(tex_interp_map[mem.device_pointer]) {
886                                 CUDAContextScope scope(this);
887                                 cuArrayDestroy((CUarray)mem.device_pointer);
888
889                                 /* Free CUtexObject (Bindless Textures) */
890                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
891                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
892                                         cuTexObjectDestroy(flat_slot);
893                                 }
894
895                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
896                                 mem.device_pointer = 0;
897
898                                 stats.mem_free(mem.device_size);
899                                 mem.device_size = 0;
900                         }
901                         else {
902                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
903                                 mem_free(mem);
904                         }
905                 }
906         }
907
908         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
909         {
910                 mem_alloc("Denoising Tile Info", task->tiles_mem, MEM_READ_ONLY);
911
912                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.data_pointer;
913                 for(int i = 0; i < 9; i++) {
914                         tiles->buffers[i] = buffers[i];
915                 }
916
917                 mem_copy_to(task->tiles_mem);
918
919                 return !have_error();
920         }
921
922 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
923                         int threads_per_block;                                                                              \
924                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
925                         int threads = (int)sqrt((float)threads_per_block);                                                  \
926                         int xblocks = ((w) + threads - 1)/threads;                                                          \
927                         int yblocks = ((h) + threads - 1)/threads;
928
929 #define CUDA_LAUNCH_KERNEL(func, args)                      \
930                         cuda_assert(cuLaunchKernel(func,                \
931                                                    xblocks, yblocks, 1, \
932                                                    threads, threads, 1, \
933                                                    0, 0, args, 0));
934
935         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
936                                        DenoisingTask *task)
937         {
938                 if(have_error())
939                         return false;
940
941                 CUDAContextScope scope(this);
942
943                 int4 rect = task->rect;
944                 int w = align_up(rect.z-rect.x, 4);
945                 int h = rect.w-rect.y;
946                 int r = task->nlm_state.r;
947                 int f = task->nlm_state.f;
948                 float a = task->nlm_state.a;
949                 float k_2 = task->nlm_state.k_2;
950
951                 CUdeviceptr difference     = task->nlm_state.temporary_1_ptr;
952                 CUdeviceptr blurDifference = task->nlm_state.temporary_2_ptr;
953                 CUdeviceptr weightAccum    = task->nlm_state.temporary_3_ptr;
954
955                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*w*h));
956                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*w*h));
957
958                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput, cuNLMNormalize;
959                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
960                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
961                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
962                 cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
963                 cuda_assert(cuModuleGetFunction(&cuNLMNormalize,      cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
964
965                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
966                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
967                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
968                 cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
969                 cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize,      CU_FUNC_CACHE_PREFER_L1));
970
971                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference, rect.z-rect.x, rect.w-rect.y);
972
973                 int dx, dy;
974                 int4 local_rect;
975                 int channel_offset = 0;
976                 void *calc_difference_args[] = {&dx, &dy, &guide_ptr, &variance_ptr, &difference, &local_rect, &w, &channel_offset, &a, &k_2};
977                 void *blur_args[]            = {&difference, &blurDifference, &local_rect, &w, &f};
978                 void *calc_weight_args[]     = {&blurDifference, &difference, &local_rect, &w, &f};
979                 void *update_output_args[]   = {&dx, &dy, &blurDifference, &image_ptr, &out_ptr, &weightAccum, &local_rect, &w, &f};
980
981                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
982                         dy = i / (2*r+1) - r;
983                         dx = i % (2*r+1) - r;
984                         local_rect = make_int4(max(0, -dx), max(0, -dy), rect.z-rect.x - max(0, dx), rect.w-rect.y - max(0, dy));
985
986                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
987                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
988                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
989                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
990                         CUDA_LAUNCH_KERNEL(cuNLMUpdateOutput, update_output_args);
991                 }
992
993                 local_rect = make_int4(0, 0, rect.z-rect.x, rect.w-rect.y);
994                 void *normalize_args[] = {&out_ptr, &weightAccum, &local_rect, &w};
995                 CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
996                 cuda_assert(cuCtxSynchronize());
997
998                 return !have_error();
999         }
1000
1001         bool denoising_construct_transform(DenoisingTask *task)
1002         {
1003                 if(have_error())
1004                         return false;
1005
1006                 CUDAContextScope scope(this);
1007
1008                 CUfunction cuFilterConstructTransform;
1009                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1010                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1011                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1012                                    task->storage.w,
1013                                    task->storage.h);
1014
1015                 void *args[] = {&task->buffer.mem.device_pointer,
1016                                 &task->storage.transform.device_pointer,
1017                                 &task->storage.rank.device_pointer,
1018                                 &task->filter_area,
1019                                 &task->rect,
1020                                 &task->radius,
1021                                 &task->pca_threshold,
1022                                 &task->buffer.pass_stride};
1023                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1024                 cuda_assert(cuCtxSynchronize());
1025
1026                 return !have_error();
1027         }
1028
1029         bool denoising_reconstruct(device_ptr color_ptr,
1030                                    device_ptr color_variance_ptr,
1031                                    device_ptr output_ptr,
1032                                    DenoisingTask *task)
1033         {
1034                 if(have_error())
1035                         return false;
1036
1037                 CUDAContextScope scope(this);
1038
1039                 mem_zero(task->storage.XtWX);
1040                 mem_zero(task->storage.XtWY);
1041
1042                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian, cuFinalize;
1043                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1044                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1045                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1046                 cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1047                 cuda_assert(cuModuleGetFunction(&cuFinalize,            cuFilterModule, "kernel_cuda_filter_finalize"));
1048
1049                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1050                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1051                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1052                 cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1053                 cuda_assert(cuFuncSetCacheConfig(cuFinalize,            CU_FUNC_CACHE_PREFER_L1));
1054
1055                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference,
1056                                    task->reconstruction_state.source_w,
1057                                    task->reconstruction_state.source_h);
1058
1059                 CUdeviceptr difference     = task->reconstruction_state.temporary_1_ptr;
1060                 CUdeviceptr blurDifference = task->reconstruction_state.temporary_2_ptr;
1061
1062                 int r = task->radius;
1063                 int f = 4;
1064                 float a = 1.0f;
1065                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1066                         int dy = i / (2*r+1) - r;
1067                         int dx = i % (2*r+1) - r;
1068
1069                         int local_rect[4] = {max(0, -dx), max(0, -dy),
1070                                              task->reconstruction_state.source_w - max(0, dx),
1071                                              task->reconstruction_state.source_h - max(0, dy)};
1072
1073                         void *calc_difference_args[] = {&dx, &dy,
1074                                                         &color_ptr,
1075                                                         &color_variance_ptr,
1076                                                         &difference,
1077                                                         &local_rect,
1078                                                         &task->buffer.w,
1079                                                         &task->buffer.pass_stride,
1080                                                         &a,
1081                                                         &task->nlm_k_2};
1082                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1083
1084                         void *blur_args[] = {&difference,
1085                                              &blurDifference,
1086                                              &local_rect,
1087                                              &task->buffer.w,
1088                                              &f};
1089                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1090
1091                         void *calc_weight_args[] = {&blurDifference,
1092                                                     &difference,
1093                                                     &local_rect,
1094                                                     &task->buffer.w,
1095                                                     &f};
1096                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1097
1098                         /* Reuse previous arguments. */
1099                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1100
1101                         void *construct_gramian_args[] = {&dx, &dy,
1102                                                           &blurDifference,
1103                                                           &task->buffer.mem.device_pointer,
1104                                                           &task->storage.transform.device_pointer,
1105                                                           &task->storage.rank.device_pointer,
1106                                                           &task->storage.XtWX.device_pointer,
1107                                                           &task->storage.XtWY.device_pointer,
1108                                                           &local_rect,
1109                                                           &task->reconstruction_state.filter_rect,
1110                                                           &task->buffer.w,
1111                                                           &task->buffer.h,
1112                                                           &f,
1113                                                       &task->buffer.pass_stride};
1114                         CUDA_LAUNCH_KERNEL(cuNLMConstructGramian, construct_gramian_args);
1115                 }
1116
1117                 void *finalize_args[] = {&task->buffer.w,
1118                                          &task->buffer.h,
1119                                          &output_ptr,
1120                                                  &task->storage.rank.device_pointer,
1121                                                  &task->storage.XtWX.device_pointer,
1122                                                  &task->storage.XtWY.device_pointer,
1123                                                  &task->filter_area,
1124                                                  &task->reconstruction_state.buffer_params.x,
1125                                                  &task->render_buffer.samples};
1126                 CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1127                 cuda_assert(cuCtxSynchronize());
1128
1129                 return !have_error();
1130         }
1131
1132         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1133                                       device_ptr mean_ptr, device_ptr variance_ptr,
1134                                       int r, int4 rect, DenoisingTask *task)
1135         {
1136                 if(have_error())
1137                         return false;
1138
1139                 CUDAContextScope scope(this);
1140
1141                 CUfunction cuFilterCombineHalves;
1142                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1143                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1144                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1145                                    task->rect.z-task->rect.x,
1146                                    task->rect.w-task->rect.y);
1147
1148                 void *args[] = {&mean_ptr,
1149                                 &variance_ptr,
1150                                 &a_ptr,
1151                                 &b_ptr,
1152                                 &rect,
1153                                 &r};
1154                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1155                 cuda_assert(cuCtxSynchronize());
1156
1157                 return !have_error();
1158         }
1159
1160         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1161                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1162                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1163         {
1164                 if(have_error())
1165                         return false;
1166
1167                 CUDAContextScope scope(this);
1168
1169                 CUfunction cuFilterDivideShadow;
1170                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1171                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1172                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1173                                    task->rect.z-task->rect.x,
1174                                    task->rect.w-task->rect.y);
1175
1176                 bool use_split_variance = use_split_kernel();
1177                 void *args[] = {&task->render_buffer.samples,
1178                                 &task->tiles_mem.device_pointer,
1179                                 &a_ptr,
1180                                 &b_ptr,
1181                                 &sample_variance_ptr,
1182                                 &sv_variance_ptr,
1183                                 &buffer_variance_ptr,
1184                                 &task->rect,
1185                                 &task->render_buffer.pass_stride,
1186                                 &task->render_buffer.denoising_data_offset,
1187                                 &use_split_variance};
1188                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1189                 cuda_assert(cuCtxSynchronize());
1190
1191                 return !have_error();
1192         }
1193
1194         bool denoising_get_feature(int mean_offset,
1195                                    int variance_offset,
1196                                    device_ptr mean_ptr,
1197                                    device_ptr variance_ptr,
1198                                    DenoisingTask *task)
1199         {
1200                 if(have_error())
1201                         return false;
1202
1203                 CUDAContextScope scope(this);
1204
1205                 CUfunction cuFilterGetFeature;
1206                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1207                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1208                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1209                                    task->rect.z-task->rect.x,
1210                                    task->rect.w-task->rect.y);
1211
1212                 bool use_split_variance = use_split_kernel();
1213                 void *args[] = {&task->render_buffer.samples,
1214                                 &task->tiles_mem.device_pointer,
1215                                         &mean_offset,
1216                                         &variance_offset,
1217                                 &mean_ptr,
1218                                 &variance_ptr,
1219                                 &task->rect,
1220                                 &task->render_buffer.pass_stride,
1221                                 &task->render_buffer.denoising_data_offset,
1222                                 &use_split_variance};
1223                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1224                 cuda_assert(cuCtxSynchronize());
1225
1226                 return !have_error();
1227         }
1228
1229         bool denoising_detect_outliers(device_ptr image_ptr,
1230                                        device_ptr variance_ptr,
1231                                        device_ptr depth_ptr,
1232                                        device_ptr output_ptr,
1233                                        DenoisingTask *task)
1234         {
1235                 if(have_error())
1236                         return false;
1237
1238                 CUDAContextScope scope(this);
1239
1240                 CUfunction cuFilterDetectOutliers;
1241                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1242                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1243                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1244                                    task->rect.z-task->rect.x,
1245                                    task->rect.w-task->rect.y);
1246
1247                 void *args[] = {&image_ptr,
1248                                 &variance_ptr,
1249                                 &depth_ptr,
1250                                 &output_ptr,
1251                                 &task->rect,
1252                                 &task->buffer.pass_stride};
1253
1254                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1255                 cuda_assert(cuCtxSynchronize());
1256
1257                 return !have_error();
1258         }
1259
1260         void denoise(RenderTile &rtile, const DeviceTask &task)
1261         {
1262                 DenoisingTask denoising(this);
1263
1264                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1265                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1266                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1267                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1268                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1269                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1270                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1271                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1272
1273                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1274                 denoising.render_buffer.samples = rtile.sample;
1275
1276                 RenderTile rtiles[9];
1277                 rtiles[4] = rtile;
1278                 task.map_neighbor_tiles(rtiles, this);
1279                 denoising.tiles_from_rendertiles(rtiles);
1280
1281                 denoising.init_from_devicetask(task);
1282
1283                 denoising.run_denoising();
1284
1285                 task.unmap_neighbor_tiles(rtiles, this);
1286         }
1287
1288         void path_trace(RenderTile& rtile, int sample, bool branched)
1289         {
1290                 if(have_error())
1291                         return;
1292
1293                 CUDAContextScope scope(this);
1294
1295                 CUfunction cuPathTrace;
1296
1297                 /* get kernel function */
1298                 if(branched) {
1299                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1300                 }
1301                 else {
1302                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1303                 }
1304
1305                 if(have_error()) {
1306                         return;
1307                 }
1308
1309                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1310
1311                 /* allocate work tile */
1312                 device_vector<WorkTile> work_tiles;
1313                 work_tiles.resize(1);
1314
1315                 WorkTile *wtile = work_tiles.get_data();
1316                 wtile->x = rtile.x;
1317                 wtile->y = rtile.y;
1318                 wtile->w = rtile.w;
1319                 wtile->h = rtile.h;
1320                 wtile->offset = rtile.offset;
1321                 wtile->stride = rtile.stride;
1322                 wtile->start_sample = sample;
1323                 wtile->num_samples = 1;
1324                 wtile->buffer = (float*)cuda_device_ptr(rtile.buffer);
1325                 wtile->rng_state = (uint*)cuda_device_ptr(rtile.rng_state);
1326
1327                 mem_alloc("work_tiles", work_tiles, MEM_READ_ONLY);
1328                 mem_copy_to(work_tiles);
1329
1330                 CUdeviceptr d_work_tiles = cuda_device_ptr(work_tiles.device_pointer);
1331
1332                 uint total_work_size = wtile->w * wtile->h * wtile->num_samples;
1333
1334                 /* pass in parameters */
1335                 void *args[] = {&d_work_tiles,
1336                                 &total_work_size};
1337
1338                 /* launch kernel */
1339                 int num_threads_per_block;
1340                 cuda_assert(cuFuncGetAttribute(&num_threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
1341                 int num_blocks = divide_up(total_work_size, num_threads_per_block);
1342
1343                 cuda_assert(cuLaunchKernel(cuPathTrace,
1344                                            num_blocks, 1, 1,
1345                                            num_threads_per_block, 1, 1,
1346                                            0, 0, args, 0));
1347
1348                 cuda_assert(cuCtxSynchronize());
1349
1350                 mem_free(work_tiles);
1351         }
1352
1353         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1354         {
1355                 if(have_error())
1356                         return;
1357
1358                 CUDAContextScope scope(this);
1359
1360                 CUfunction cuFilmConvert;
1361                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1362                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1363
1364                 /* get kernel function */
1365                 if(rgba_half) {
1366                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1367                 }
1368                 else {
1369                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1370                 }
1371
1372
1373                 float sample_scale = 1.0f/(task.sample + 1);
1374
1375                 /* pass in parameters */
1376                 void *args[] = {&d_rgba,
1377                                 &d_buffer,
1378                                 &sample_scale,
1379                                 &task.x,
1380                                 &task.y,
1381                                 &task.w,
1382                                 &task.h,
1383                                 &task.offset,
1384                                 &task.stride};
1385
1386                 /* launch kernel */
1387                 int threads_per_block;
1388                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1389
1390                 int xthreads = (int)sqrt(threads_per_block);
1391                 int ythreads = (int)sqrt(threads_per_block);
1392                 int xblocks = (task.w + xthreads - 1)/xthreads;
1393                 int yblocks = (task.h + ythreads - 1)/ythreads;
1394
1395                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1396
1397                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1398                                            xblocks , yblocks, 1, /* blocks */
1399                                            xthreads, ythreads, 1, /* threads */
1400                                            0, 0, args, 0));
1401
1402                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1403         }
1404
1405         void shader(DeviceTask& task)
1406         {
1407                 if(have_error())
1408                         return;
1409
1410                 CUDAContextScope scope(this);
1411
1412                 CUfunction cuShader;
1413                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1414                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1415                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
1416
1417                 /* get kernel function */
1418                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1419                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1420                 }
1421                 else {
1422                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
1423                 }
1424
1425                 /* do tasks in smaller chunks, so we can cancel it */
1426                 const int shader_chunk_size = 65536;
1427                 const int start = task.shader_x;
1428                 const int end = task.shader_x + task.shader_w;
1429                 int offset = task.offset;
1430
1431                 bool canceled = false;
1432                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1433                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1434                                 int shader_w = min(shader_chunk_size, end - shader_x);
1435
1436                                 /* pass in parameters */
1437                                 void *args[8];
1438                                 int arg = 0;
1439                                 args[arg++] = &d_input;
1440                                 args[arg++] = &d_output;
1441                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
1442                                         args[arg++] = &d_output_luma;
1443                                 }
1444                                 args[arg++] = &task.shader_eval_type;
1445                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1446                                         args[arg++] = &task.shader_filter;
1447                                 }
1448                                 args[arg++] = &shader_x;
1449                                 args[arg++] = &shader_w;
1450                                 args[arg++] = &offset;
1451                                 args[arg++] = &sample;
1452
1453                                 /* launch kernel */
1454                                 int threads_per_block;
1455                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1456
1457                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1458
1459                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1460                                 cuda_assert(cuLaunchKernel(cuShader,
1461                                                            xblocks , 1, 1, /* blocks */
1462                                                            threads_per_block, 1, 1, /* threads */
1463                                                            0, 0, args, 0));
1464
1465                                 cuda_assert(cuCtxSynchronize());
1466
1467                                 if(task.get_cancel()) {
1468                                         canceled = true;
1469                                         break;
1470                                 }
1471                         }
1472
1473                         task.update_progress(NULL);
1474                 }
1475         }
1476
1477         CUdeviceptr map_pixels(device_ptr mem)
1478         {
1479                 if(!background) {
1480                         PixelMem pmem = pixel_mem_map[mem];
1481                         CUdeviceptr buffer;
1482
1483                         size_t bytes;
1484                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1485                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1486
1487                         return buffer;
1488                 }
1489
1490                 return cuda_device_ptr(mem);
1491         }
1492
1493         void unmap_pixels(device_ptr mem)
1494         {
1495                 if(!background) {
1496                         PixelMem pmem = pixel_mem_map[mem];
1497
1498                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1499                 }
1500         }
1501
1502         void pixels_alloc(device_memory& mem)
1503         {
1504                 if(!background) {
1505                         PixelMem pmem;
1506
1507                         pmem.w = mem.data_width;
1508                         pmem.h = mem.data_height;
1509
1510                         CUDAContextScope scope(this);
1511
1512                         glGenBuffers(1, &pmem.cuPBO);
1513                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1514                         if(mem.data_type == TYPE_HALF)
1515                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1516                         else
1517                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1518
1519                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1520
1521                         glGenTextures(1, &pmem.cuTexId);
1522                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1523                         if(mem.data_type == TYPE_HALF)
1524                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1525                         else
1526                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1527                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1528                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1529                         glBindTexture(GL_TEXTURE_2D, 0);
1530
1531                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1532
1533                         if(result == CUDA_SUCCESS) {
1534                                 mem.device_pointer = pmem.cuTexId;
1535                                 pixel_mem_map[mem.device_pointer] = pmem;
1536
1537                                 mem.device_size = mem.memory_size();
1538                                 stats.mem_alloc(mem.device_size);
1539
1540                                 return;
1541                         }
1542                         else {
1543                                 /* failed to register buffer, fallback to no interop */
1544                                 glDeleteBuffers(1, &pmem.cuPBO);
1545                                 glDeleteTextures(1, &pmem.cuTexId);
1546
1547                                 background = true;
1548                         }
1549                 }
1550
1551                 Device::pixels_alloc(mem);
1552         }
1553
1554         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1555         {
1556                 if(!background) {
1557                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1558
1559                         CUDAContextScope scope(this);
1560
1561                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1562                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1563                         size_t offset = sizeof(uchar)*4*y*w;
1564                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1565                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1566                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1567
1568                         return;
1569                 }
1570
1571                 Device::pixels_copy_from(mem, y, w, h);
1572         }
1573
1574         void pixels_free(device_memory& mem)
1575         {
1576                 if(mem.device_pointer) {
1577                         if(!background) {
1578                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1579
1580                                 CUDAContextScope scope(this);
1581
1582                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1583                                 glDeleteBuffers(1, &pmem.cuPBO);
1584                                 glDeleteTextures(1, &pmem.cuTexId);
1585
1586                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1587                                 mem.device_pointer = 0;
1588
1589                                 stats.mem_free(mem.device_size);
1590                                 mem.device_size = 0;
1591
1592                                 return;
1593                         }
1594
1595                         Device::pixels_free(mem);
1596                 }
1597         }
1598
1599         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1600                 const DeviceDrawParams &draw_params)
1601         {
1602                 if(!background) {
1603                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1604                         float *vpointer;
1605
1606                         CUDAContextScope scope(this);
1607
1608                         /* for multi devices, this assumes the inefficient method that we allocate
1609                          * all pixels on the device even though we only render to a subset */
1610                         size_t offset = 4*y*w;
1611
1612                         if(mem.data_type == TYPE_HALF)
1613                                 offset *= sizeof(GLhalf);
1614                         else
1615                                 offset *= sizeof(uint8_t);
1616
1617                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1618                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1619                         if(mem.data_type == TYPE_HALF)
1620                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1621                         else
1622                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1623                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1624
1625                         glEnable(GL_TEXTURE_2D);
1626
1627                         if(transparent) {
1628                                 glEnable(GL_BLEND);
1629                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1630                         }
1631
1632                         glColor3f(1.0f, 1.0f, 1.0f);
1633
1634                         if(draw_params.bind_display_space_shader_cb) {
1635                                 draw_params.bind_display_space_shader_cb();
1636                         }
1637
1638                         if(!vertex_buffer)
1639                                 glGenBuffers(1, &vertex_buffer);
1640
1641                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1642                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1643                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1644
1645                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1646
1647                         if(vpointer) {
1648                                 /* texture coordinate - vertex pair */
1649                                 vpointer[0] = 0.0f;
1650                                 vpointer[1] = 0.0f;
1651                                 vpointer[2] = dx;
1652                                 vpointer[3] = dy;
1653
1654                                 vpointer[4] = (float)w/(float)pmem.w;
1655                                 vpointer[5] = 0.0f;
1656                                 vpointer[6] = (float)width + dx;
1657                                 vpointer[7] = dy;
1658
1659                                 vpointer[8] = (float)w/(float)pmem.w;
1660                                 vpointer[9] = (float)h/(float)pmem.h;
1661                                 vpointer[10] = (float)width + dx;
1662                                 vpointer[11] = (float)height + dy;
1663
1664                                 vpointer[12] = 0.0f;
1665                                 vpointer[13] = (float)h/(float)pmem.h;
1666                                 vpointer[14] = dx;
1667                                 vpointer[15] = (float)height + dy;
1668
1669                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1670                         }
1671
1672                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1673                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1674
1675                         glEnableClientState(GL_VERTEX_ARRAY);
1676                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1677
1678                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1679
1680                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1681                         glDisableClientState(GL_VERTEX_ARRAY);
1682
1683                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1684
1685                         if(draw_params.unbind_display_space_shader_cb) {
1686                                 draw_params.unbind_display_space_shader_cb();
1687                         }
1688
1689                         if(transparent)
1690                                 glDisable(GL_BLEND);
1691
1692                         glBindTexture(GL_TEXTURE_2D, 0);
1693                         glDisable(GL_TEXTURE_2D);
1694
1695                         return;
1696                 }
1697
1698                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1699         }
1700
1701         void thread_run(DeviceTask *task)
1702         {
1703                 CUDAContextScope scope(this);
1704
1705                 if(task->type == DeviceTask::RENDER) {
1706                         RenderTile tile;
1707
1708                         bool branched = task->integrator_branched;
1709
1710                         /* Upload Bindless Mapping */
1711                         load_bindless_mapping();
1712
1713                         DeviceRequestedFeatures requested_features;
1714                         if(use_split_kernel()) {
1715                                 if(!use_adaptive_compilation()) {
1716                                         requested_features.max_closure = 64;
1717                                 }
1718
1719                                 if(split_kernel == NULL) {
1720                                         split_kernel = new CUDASplitKernel(this);
1721                                         split_kernel->load_kernels(requested_features);
1722                                 }
1723                         }
1724
1725                         /* keep rendering tiles until done */
1726                         while(task->acquire_tile(this, tile)) {
1727                                 if(tile.task == RenderTile::PATH_TRACE) {
1728                                         if(use_split_kernel()) {
1729                                                 device_memory void_buffer;
1730                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
1731                                         }
1732                                         else {
1733                                                 int start_sample = tile.start_sample;
1734                                                 int end_sample = tile.start_sample + tile.num_samples;
1735
1736                                                 for(int sample = start_sample; sample < end_sample; sample++) {
1737                                                         if(task->get_cancel()) {
1738                                                                 if(task->need_finish_queue == false)
1739                                                                         break;
1740                                                         }
1741
1742                                                         path_trace(tile, sample, branched);
1743
1744                                                         tile.sample = sample + 1;
1745
1746                                                         task->update_progress(&tile, tile.w*tile.h);
1747                                                 }
1748                                         }
1749                                 }
1750                                 else if(tile.task == RenderTile::DENOISE) {
1751                                         tile.sample = tile.start_sample + tile.num_samples;
1752
1753                                         denoise(tile, *task);
1754
1755                                         task->update_progress(&tile, tile.w*tile.h);
1756                                 }
1757
1758                                 task->release_tile(tile);
1759
1760                                 if(task->get_cancel()) {
1761                                         if(task->need_finish_queue == false)
1762                                                 break;
1763                                 }
1764                         }
1765                 }
1766                 else if(task->type == DeviceTask::SHADER) {
1767                         /* Upload Bindless Mapping */
1768                         load_bindless_mapping();
1769
1770                         shader(*task);
1771
1772                         cuda_assert(cuCtxSynchronize());
1773                 }
1774         }
1775
1776         class CUDADeviceTask : public DeviceTask {
1777         public:
1778                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1779                 : DeviceTask(task)
1780                 {
1781                         run = function_bind(&CUDADevice::thread_run, device, this);
1782                 }
1783         };
1784
1785         int get_split_task_count(DeviceTask& /*task*/)
1786         {
1787                 return 1;
1788         }
1789
1790         void task_add(DeviceTask& task)
1791         {
1792                 if(task.type == DeviceTask::FILM_CONVERT) {
1793                         CUDAContextScope scope(this);
1794
1795                         /* must be done in main thread due to opengl access */
1796                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1797                         cuda_assert(cuCtxSynchronize());
1798                 }
1799                 else {
1800                         task_pool.push(new CUDADeviceTask(this, task));
1801                 }
1802         }
1803
1804         void task_wait()
1805         {
1806                 task_pool.wait();
1807         }
1808
1809         void task_cancel()
1810         {
1811                 task_pool.cancel();
1812         }
1813
1814         friend class CUDASplitKernelFunction;
1815         friend class CUDASplitKernel;
1816         friend class CUDAContextScope;
1817 };
1818
1819 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1820  * now that the definition of that class is complete
1821  */
1822 #undef cuda_assert
1823 #define cuda_assert(stmt) \
1824         { \
1825                 CUresult result = stmt; \
1826                 \
1827                 if(result != CUDA_SUCCESS) { \
1828                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1829                         if(device->error_msg == "") \
1830                                 device->error_msg = message; \
1831                         fprintf(stderr, "%s\n", message.c_str()); \
1832                         /*cuda_abort();*/ \
1833                         device->cuda_error_documentation(); \
1834                 } \
1835         } (void)0
1836
1837
1838 /* CUDA context scope. */
1839
1840 CUDAContextScope::CUDAContextScope(CUDADevice *device)
1841 : device(device)
1842 {
1843         cuda_assert(cuCtxPushCurrent(device->cuContext));
1844 }
1845
1846 CUDAContextScope::~CUDAContextScope()
1847 {
1848         cuda_assert(cuCtxPopCurrent(NULL));
1849 }
1850
1851 /* split kernel */
1852
1853 class CUDASplitKernelFunction : public SplitKernelFunction{
1854         CUDADevice* device;
1855         CUfunction func;
1856 public:
1857         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
1858
1859         /* enqueue the kernel, returns false if there is an error */
1860         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
1861         {
1862                 return enqueue(dim, NULL);
1863         }
1864
1865         /* enqueue the kernel, returns false if there is an error */
1866         bool enqueue(const KernelDimensions &dim, void *args[])
1867         {
1868                 if(device->have_error())
1869                         return false;
1870
1871                 CUDAContextScope scope(device);
1872
1873                 /* we ignore dim.local_size for now, as this is faster */
1874                 int threads_per_block;
1875                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
1876
1877                 int xblocks = (dim.global_size[0]*dim.global_size[1] + threads_per_block - 1)/threads_per_block;
1878
1879                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
1880
1881                 cuda_assert(cuLaunchKernel(func,
1882                                            xblocks, 1, 1, /* blocks */
1883                                            threads_per_block, 1, 1, /* threads */
1884                                            0, 0, args, 0));
1885
1886                 return !device->have_error();
1887         }
1888 };
1889
1890 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
1891 {
1892 }
1893
1894 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
1895 {
1896         CUDAContextScope scope(device);
1897
1898         device_vector<uint64_t> size_buffer;
1899         size_buffer.resize(1);
1900         device->mem_alloc(NULL, size_buffer, MEM_READ_WRITE);
1901
1902         uint threads = num_threads;
1903         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
1904
1905         struct args_t {
1906                 uint* num_threads;
1907                 CUdeviceptr* size;
1908         };
1909
1910         args_t args = {
1911                 &threads,
1912                 &d_size
1913         };
1914
1915         CUfunction state_buffer_size;
1916         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
1917
1918         cuda_assert(cuLaunchKernel(state_buffer_size,
1919                                    1, 1, 1,
1920                                    1, 1, 1,
1921                                    0, 0, (void**)&args, 0));
1922
1923         device->mem_copy_from(size_buffer, 0, 1, 1, sizeof(uint64_t));
1924         device->mem_free(size_buffer);
1925
1926         return *size_buffer.get_data();
1927 }
1928
1929 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
1930                                     RenderTile& rtile,
1931                                     int num_global_elements,
1932                                     device_memory& /*kernel_globals*/,
1933                                     device_memory& /*kernel_data*/,
1934                                     device_memory& split_data,
1935                                     device_memory& ray_state,
1936                                     device_memory& queue_index,
1937                                     device_memory& use_queues_flag,
1938                                     device_memory& work_pool_wgs)
1939 {
1940         CUDAContextScope scope(device);
1941
1942         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
1943         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
1944         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
1945         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
1946         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
1947
1948         CUdeviceptr d_rng_state = device->cuda_device_ptr(rtile.rng_state);
1949         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
1950
1951         int end_sample = rtile.start_sample + rtile.num_samples;
1952         int queue_size = dim.global_size[0] * dim.global_size[1];
1953
1954         struct args_t {
1955                 CUdeviceptr* split_data_buffer;
1956                 int* num_elements;
1957                 CUdeviceptr* ray_state;
1958                 CUdeviceptr* rng_state;
1959                 int* start_sample;
1960                 int* end_sample;
1961                 int* sx;
1962                 int* sy;
1963                 int* sw;
1964                 int* sh;
1965                 int* offset;
1966                 int* stride;
1967                 CUdeviceptr* queue_index;
1968                 int* queuesize;
1969                 CUdeviceptr* use_queues_flag;
1970                 CUdeviceptr* work_pool_wgs;
1971                 int* num_samples;
1972                 CUdeviceptr* buffer;
1973         };
1974
1975         args_t args = {
1976                 &d_split_data,
1977                 &num_global_elements,
1978                 &d_ray_state,
1979                 &d_rng_state,
1980                 &rtile.start_sample,
1981                 &end_sample,
1982                 &rtile.x,
1983                 &rtile.y,
1984                 &rtile.w,
1985                 &rtile.h,
1986                 &rtile.offset,
1987                 &rtile.stride,
1988                 &d_queue_index,
1989                 &queue_size,
1990                 &d_use_queues_flag,
1991                 &d_work_pool_wgs,
1992                 &rtile.num_samples,
1993                 &d_buffer
1994         };
1995
1996         CUfunction data_init;
1997         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
1998         if(device->have_error()) {
1999                 return false;
2000         }
2001
2002         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2003
2004         return !device->have_error();
2005 }
2006
2007 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2008                                                                 const DeviceRequestedFeatures&)
2009 {
2010         CUDAContextScope scope(device);
2011         CUfunction func;
2012
2013         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2014         if(device->have_error()) {
2015                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2016                 return NULL;
2017         }
2018
2019         return new CUDASplitKernelFunction(device, func);
2020 }
2021
2022 int2 CUDASplitKernel::split_kernel_local_size()
2023 {
2024         return make_int2(32, 1);
2025 }
2026
2027 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2028 {
2029         CUDAContextScope scope(device);
2030         size_t free;
2031         size_t total;
2032
2033         cuda_assert(cuMemGetInfo(&free, &total));
2034
2035         VLOG(1) << "Maximum device allocation size: "
2036                 << string_human_readable_number(free) << " bytes. ("
2037                 << string_human_readable_size(free) << ").";
2038
2039         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2040         size_t side = round_down((int)sqrt(num_elements), 32);
2041         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2042         VLOG(1) << "Global size: " << global_size << ".";
2043         return global_size;
2044 }
2045
2046 bool device_cuda_init(void)
2047 {
2048 #ifdef WITH_CUDA_DYNLOAD
2049         static bool initialized = false;
2050         static bool result = false;
2051
2052         if(initialized)
2053                 return result;
2054
2055         initialized = true;
2056         int cuew_result = cuewInit();
2057         if(cuew_result == CUEW_SUCCESS) {
2058                 VLOG(1) << "CUEW initialization succeeded";
2059                 if(CUDADevice::have_precompiled_kernels()) {
2060                         VLOG(1) << "Found precompiled kernels";
2061                         result = true;
2062                 }
2063 #ifndef _WIN32
2064                 else if(cuewCompilerPath() != NULL) {
2065                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2066                         result = true;
2067                 }
2068                 else {
2069                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
2070                                 << " unable to use CUDA";
2071                 }
2072 #endif
2073         }
2074         else {
2075                 VLOG(1) << "CUEW initialization failed: "
2076                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2077                             ? "Error setting up atexit() handler"
2078                             : "Error opening the library");
2079         }
2080
2081         return result;
2082 #else  /* WITH_CUDA_DYNLOAD */
2083         return true;
2084 #endif /* WITH_CUDA_DYNLOAD */
2085 }
2086
2087 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2088 {
2089         return new CUDADevice(info, stats, background);
2090 }
2091
2092 static CUresult device_cuda_safe_init()
2093 {
2094 #ifdef _WIN32
2095         __try {
2096                 return cuInit(0);
2097         }
2098         __except(EXCEPTION_EXECUTE_HANDLER) {
2099                 /* Ignore crashes inside the CUDA driver and hope we can
2100                  * survive even with corrupted CUDA installs. */
2101                 fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");
2102         }
2103
2104         return CUDA_ERROR_NO_DEVICE;
2105 #else
2106         return cuInit(0);
2107 #endif
2108 }
2109
2110 void device_cuda_info(vector<DeviceInfo>& devices)
2111 {
2112         CUresult result = device_cuda_safe_init();
2113         if(result != CUDA_SUCCESS) {
2114                 if(result != CUDA_ERROR_NO_DEVICE)
2115                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2116                 return;
2117         }
2118
2119         int count = 0;
2120         result = cuDeviceGetCount(&count);
2121         if(result != CUDA_SUCCESS) {
2122                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2123                 return;
2124         }
2125
2126         vector<DeviceInfo> display_devices;
2127
2128         for(int num = 0; num < count; num++) {
2129                 char name[256];
2130                 int attr;
2131
2132                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
2133                         continue;
2134
2135                 int major;
2136                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2137                 if(major < 2) {
2138                         continue;
2139                 }
2140
2141                 DeviceInfo info;
2142
2143                 info.type = DEVICE_CUDA;
2144                 info.description = string(name);
2145                 info.num = num;
2146
2147                 info.advanced_shading = (major >= 2);
2148                 info.has_bindless_textures = (major >= 3);
2149
2150                 int pci_location[3] = {0, 0, 0};
2151                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2152                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2153                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2154                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2155                                         name,
2156                                         (unsigned int)pci_location[0],
2157                                         (unsigned int)pci_location[1],
2158                                         (unsigned int)pci_location[2]);
2159
2160                 /* if device has a kernel timeout, assume it is used for display */
2161                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
2162                         info.description += " (Display)";
2163                         info.display_device = true;
2164                         display_devices.push_back(info);
2165                 }
2166                 else
2167                         devices.push_back(info);
2168         }
2169
2170         if(!display_devices.empty())
2171                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2172 }
2173
2174 string device_cuda_capabilities(void)
2175 {
2176         CUresult result = device_cuda_safe_init();
2177         if(result != CUDA_SUCCESS) {
2178                 if(result != CUDA_ERROR_NO_DEVICE) {
2179                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2180                 }
2181                 return "No CUDA device found\n";
2182         }
2183
2184         int count;
2185         result = cuDeviceGetCount(&count);
2186         if(result != CUDA_SUCCESS) {
2187                 return string("Error getting devices: ") + cuewErrorString(result);
2188         }
2189
2190         string capabilities = "";
2191         for(int num = 0; num < count; num++) {
2192                 char name[256];
2193                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2194                         continue;
2195                 }
2196                 capabilities += string("\t") + name + "\n";
2197                 int value;
2198 #define GET_ATTR(attr) \
2199                 { \
2200                         if(cuDeviceGetAttribute(&value, \
2201                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2202                                                 num) == CUDA_SUCCESS) \
2203                         { \
2204                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2205                                                               value); \
2206                         } \
2207                 } (void)0
2208                 /* TODO(sergey): Strip all attributes which are not useful for us
2209                  * or does not depend on the driver.
2210                  */
2211                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2212                 GET_ATTR(MAX_BLOCK_DIM_X);
2213                 GET_ATTR(MAX_BLOCK_DIM_Y);
2214                 GET_ATTR(MAX_BLOCK_DIM_Z);
2215                 GET_ATTR(MAX_GRID_DIM_X);
2216                 GET_ATTR(MAX_GRID_DIM_Y);
2217                 GET_ATTR(MAX_GRID_DIM_Z);
2218                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2219                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2220                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2221                 GET_ATTR(WARP_SIZE);
2222                 GET_ATTR(MAX_PITCH);
2223                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2224                 GET_ATTR(REGISTERS_PER_BLOCK);
2225                 GET_ATTR(CLOCK_RATE);
2226                 GET_ATTR(TEXTURE_ALIGNMENT);
2227                 GET_ATTR(GPU_OVERLAP);
2228                 GET_ATTR(MULTIPROCESSOR_COUNT);
2229                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2230                 GET_ATTR(INTEGRATED);
2231                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2232                 GET_ATTR(COMPUTE_MODE);
2233                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2234                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2235                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2236                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2237                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2238                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2239                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2240                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2241                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2242                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2243                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2244                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2245                 GET_ATTR(SURFACE_ALIGNMENT);
2246                 GET_ATTR(CONCURRENT_KERNELS);
2247                 GET_ATTR(ECC_ENABLED);
2248                 GET_ATTR(TCC_DRIVER);
2249                 GET_ATTR(MEMORY_CLOCK_RATE);
2250                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2251                 GET_ATTR(L2_CACHE_SIZE);
2252                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2253                 GET_ATTR(ASYNC_ENGINE_COUNT);
2254                 GET_ATTR(UNIFIED_ADDRESSING);
2255                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2256                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2257                 GET_ATTR(CAN_TEX2D_GATHER);
2258                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2259                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2260                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2261                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2262                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2263                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2264                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2265                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2266                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2267                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2268                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2269                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2270                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2271                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2272                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2273                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2274                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2275                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2276                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2277                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2278                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2279                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2280                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2281                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2282                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2283                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2284                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2285                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2286                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2287                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2288                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2289                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2290                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2291                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2292                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2293                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2294                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2295                 GET_ATTR(MANAGED_MEMORY);
2296                 GET_ATTR(MULTI_GPU_BOARD);
2297                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2298 #undef GET_ATTR
2299                 capabilities += "\n";
2300         }
2301
2302         return capabilities;
2303 }
2304
2305 CCL_NAMESPACE_END