Merge branch 'master' into blender2.8
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_logging.h"
41 #include "util/util_map.h"
42 #include "util/util_md5.h"
43 #include "util/util_opengl.h"
44 #include "util/util_path.h"
45 #include "util/util_string.h"
46 #include "util/util_system.h"
47 #include "util/util_types.h"
48 #include "util/util_time.h"
49
50 #include "kernel/split/kernel_split_data_types.h"
51
52 CCL_NAMESPACE_BEGIN
53
54 #ifndef WITH_CUDA_DYNLOAD
55
56 /* Transparently implement some functions, so majority of the file does not need
57  * to worry about difference between dynamically loaded and linked CUDA at all.
58  */
59
60 namespace {
61
62 const char *cuewErrorString(CUresult result)
63 {
64         /* We can only give error code here without major code duplication, that
65          * should be enough since dynamic loading is only being disabled by folks
66          * who knows what they're doing anyway.
67          *
68          * NOTE: Avoid call from several threads.
69          */
70         static string error;
71         error = string_printf("%d", result);
72         return error.c_str();
73 }
74
75 const char *cuewCompilerPath(void)
76 {
77         return CYCLES_CUDA_NVCC_EXECUTABLE;
78 }
79
80 int cuewCompilerVersion(void)
81 {
82         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
83 }
84
85 }  /* namespace */
86 #endif  /* WITH_CUDA_DYNLOAD */
87
88 class CUDADevice;
89
90 class CUDASplitKernel : public DeviceSplitKernel {
91         CUDADevice *device;
92 public:
93         explicit CUDASplitKernel(CUDADevice *device);
94
95         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
96
97         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
98                                                     RenderTile& rtile,
99                                                     int num_global_elements,
100                                                     device_memory& kernel_globals,
101                                                     device_memory& kernel_data_,
102                                                     device_memory& split_data,
103                                                     device_memory& ray_state,
104                                                     device_memory& queue_index,
105                                                     device_memory& use_queues_flag,
106                                                     device_memory& work_pool_wgs);
107
108         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
109                                                                const DeviceRequestedFeatures&);
110         virtual int2 split_kernel_local_size();
111         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
112 };
113
114 class CUDADevice : public Device
115 {
116 public:
117         DedicatedTaskPool task_pool;
118         CUdevice cuDevice;
119         CUcontext cuContext;
120         CUmodule cuModule, cuFilterModule;
121         map<device_ptr, bool> tex_interp_map;
122         map<device_ptr, uint> tex_bindless_map;
123         int cuDevId;
124         int cuDevArchitecture;
125         bool first_error;
126         CUDASplitKernel *split_kernel;
127
128         struct PixelMem {
129                 GLuint cuPBO;
130                 CUgraphicsResource cuPBOresource;
131                 GLuint cuTexId;
132                 int w, h;
133         };
134
135         map<device_ptr, PixelMem> pixel_mem_map;
136
137         /* Bindless Textures */
138         device_vector<uint> bindless_mapping;
139         bool need_bindless_mapping;
140
141         CUdeviceptr cuda_device_ptr(device_ptr mem)
142         {
143                 return (CUdeviceptr)mem;
144         }
145
146         static bool have_precompiled_kernels()
147         {
148                 string cubins_path = path_get("lib");
149                 return path_exists(cubins_path);
150         }
151
152         virtual bool show_samples() const
153         {
154                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
155                 return true;
156         }
157
158 /*#ifdef NDEBUG
159 #define cuda_abort()
160 #else
161 #define cuda_abort() abort()
162 #endif*/
163         void cuda_error_documentation()
164         {
165                 if(first_error) {
166                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
167                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
168                         first_error = false;
169                 }
170         }
171
172 #define cuda_assert(stmt) \
173         { \
174                 CUresult result = stmt; \
175                 \
176                 if(result != CUDA_SUCCESS) { \
177                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
178                         if(error_msg == "") \
179                                 error_msg = message; \
180                         fprintf(stderr, "%s\n", message.c_str()); \
181                         /*cuda_abort();*/ \
182                         cuda_error_documentation(); \
183                 } \
184         } (void)0
185
186         bool cuda_error_(CUresult result, const string& stmt)
187         {
188                 if(result == CUDA_SUCCESS)
189                         return false;
190
191                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
192                 if(error_msg == "")
193                         error_msg = message;
194                 fprintf(stderr, "%s\n", message.c_str());
195                 cuda_error_documentation();
196                 return true;
197         }
198
199 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
200
201         void cuda_error_message(const string& message)
202         {
203                 if(error_msg == "")
204                         error_msg = message;
205                 fprintf(stderr, "%s\n", message.c_str());
206                 cuda_error_documentation();
207         }
208
209         void cuda_push_context()
210         {
211                 cuda_assert(cuCtxSetCurrent(cuContext));
212         }
213
214         void cuda_pop_context()
215         {
216                 cuda_assert(cuCtxSetCurrent(NULL));
217         }
218
219         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
220         : Device(info, stats, background_)
221         {
222                 first_error = true;
223                 background = background_;
224
225                 cuDevId = info.num;
226                 cuDevice = 0;
227                 cuContext = 0;
228
229                 cuModule = 0;
230                 cuFilterModule = 0;
231
232                 split_kernel = NULL;
233
234                 need_bindless_mapping = false;
235
236                 /* intialize */
237                 if(cuda_error(cuInit(0)))
238                         return;
239
240                 /* setup device and context */
241                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
242                         return;
243
244                 CUresult result;
245
246                 if(background) {
247                         result = cuCtxCreate(&cuContext, 0, cuDevice);
248                 }
249                 else {
250                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
251
252                         if(result != CUDA_SUCCESS) {
253                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
254                                 background = true;
255                         }
256                 }
257
258                 if(cuda_error_(result, "cuCtxCreate"))
259                         return;
260
261                 int major, minor;
262                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
263                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
264                 cuDevArchitecture = major*100 + minor*10;
265
266                 cuda_pop_context();
267         }
268
269         ~CUDADevice()
270         {
271                 task_pool.stop();
272
273                 delete split_kernel;
274
275                 if(info.has_bindless_textures) {
276                         tex_free(bindless_mapping);
277                 }
278
279                 cuda_assert(cuCtxDestroy(cuContext));
280         }
281
282         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
283         {
284                 int major, minor;
285                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
286                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
287
288                 /* We only support sm_20 and above */
289                 if(major < 2) {
290                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
291                         return false;
292                 }
293
294                 return true;
295         }
296
297         bool use_adaptive_compilation()
298         {
299                 return DebugFlags().cuda.adaptive_compile;
300         }
301
302         bool use_split_kernel()
303         {
304                 return DebugFlags().cuda.split_kernel;
305         }
306
307         /* Common NVCC flags which stays the same regardless of shading model,
308          * kernel sources md5 and only depends on compiler or compilation settings.
309          */
310         string compile_kernel_get_common_cflags(
311                 const DeviceRequestedFeatures& requested_features,
312                 bool filter=false, bool split=false)
313         {
314                 const int cuda_version = cuewCompilerVersion();
315                 const int machine = system_cpu_bits();
316                 const string source_path = path_get("source");
317                 const string include_path = source_path;
318                 string cflags = string_printf("-m%d "
319                                               "--ptxas-options=\"-v\" "
320                                               "--use_fast_math "
321                                               "-DNVCC "
322                                               "-D__KERNEL_CUDA_VERSION__=%d "
323                                                "-I\"%s\"",
324                                               machine,
325                                               cuda_version,
326                                               include_path.c_str());
327                 if(!filter && use_adaptive_compilation()) {
328                         cflags += " " + requested_features.get_build_options();
329                 }
330                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
331                 if(extra_cflags) {
332                         cflags += string(" ") + string(extra_cflags);
333                 }
334 #ifdef WITH_CYCLES_DEBUG
335                 cflags += " -D__KERNEL_DEBUG__";
336 #endif
337
338                 if(split) {
339                         cflags += " -D__SPLIT__";
340                 }
341
342                 return cflags;
343         }
344
345         bool compile_check_compiler() {
346                 const char *nvcc = cuewCompilerPath();
347                 if(nvcc == NULL) {
348                         cuda_error_message("CUDA nvcc compiler not found. "
349                                            "Install CUDA toolkit in default location.");
350                         return false;
351                 }
352                 const int cuda_version = cuewCompilerVersion();
353                 VLOG(1) << "Found nvcc " << nvcc
354                         << ", CUDA version " << cuda_version
355                         << ".";
356                 const int major = cuda_version / 10, minor = cuda_version & 10;
357                 if(cuda_version == 0) {
358                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
359                         return false;
360                 }
361                 if(cuda_version < 80) {
362                         printf("Unsupported CUDA version %d.%d detected, "
363                                "you need CUDA 8.0 or newer.\n",
364                                major, minor);
365                         return false;
366                 }
367                 else if(cuda_version != 80) {
368                         printf("CUDA version %d.%d detected, build may succeed but only "
369                                "CUDA 8.0 is officially supported.\n",
370                                major, minor);
371                 }
372                 return true;
373         }
374
375         string compile_kernel(const DeviceRequestedFeatures& requested_features,
376                               bool filter=false, bool split=false)
377         {
378                 const char *name, *source;
379                 if(filter) {
380                         name = "filter";
381                         source = "filter.cu";
382                 }
383                 else if(split) {
384                         name = "kernel_split";
385                         source = "kernel_split.cu";
386                 }
387                 else {
388                         name = "kernel";
389                         source = "kernel.cu";
390                 }
391                 /* Compute cubin name. */
392                 int major, minor;
393                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
394                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
395
396                 /* Attempt to use kernel provided with Blender. */
397                 if(!use_adaptive_compilation()) {
398                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
399                                                                     name, major, minor));
400                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
401                         if(path_exists(cubin)) {
402                                 VLOG(1) << "Using precompiled kernel.";
403                                 return cubin;
404                         }
405                 }
406
407                 const string common_cflags =
408                         compile_kernel_get_common_cflags(requested_features, filter, split);
409
410                 /* Try to use locally compiled kernel. */
411                 const string source_path = path_get("source");
412                 const string kernel_md5 = path_files_md5_hash(source_path);
413
414                 /* We include cflags into md5 so changing cuda toolkit or changing other
415                  * compiler command line arguments makes sure cubin gets re-built.
416                  */
417                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
418
419                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
420                                                         name, major, minor,
421                                                         cubin_md5.c_str());
422                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
423                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
424                 if(path_exists(cubin)) {
425                         VLOG(1) << "Using locally compiled kernel.";
426                         return cubin;
427                 }
428
429 #ifdef _WIN32
430                 if(have_precompiled_kernels()) {
431                         if(major < 2) {
432                                 cuda_error_message(string_printf(
433                                         "CUDA device requires compute capability 2.0 or up, "
434                                         "found %d.%d. Your GPU is not supported.",
435                                         major, minor));
436                         }
437                         else {
438                                 cuda_error_message(string_printf(
439                                         "CUDA binary kernel for this graphics card compute "
440                                         "capability (%d.%d) not found.",
441                                         major, minor));
442                         }
443                         return "";
444                 }
445 #endif
446
447                 /* Compile. */
448                 if(!compile_check_compiler()) {
449                         return "";
450                 }
451                 const char *nvcc = cuewCompilerPath();
452                 const string kernel = path_join(
453                         path_join(source_path, "kernel"),
454                         path_join("kernels",
455                                   path_join("cuda", source)));
456                 double starttime = time_dt();
457                 printf("Compiling CUDA kernel ...\n");
458
459                 path_create_directories(cubin);
460
461                 string command = string_printf("\"%s\" "
462                                                "-arch=sm_%d%d "
463                                                "--cubin \"%s\" "
464                                                "-o \"%s\" "
465                                                "%s ",
466                                                nvcc,
467                                                major, minor,
468                                                kernel.c_str(),
469                                                cubin.c_str(),
470                                                common_cflags.c_str());
471
472                 printf("%s\n", command.c_str());
473
474                 if(system(command.c_str()) == -1) {
475                         cuda_error_message("Failed to execute compilation command, "
476                                            "see console for details.");
477                         return "";
478                 }
479
480                 /* Verify if compilation succeeded */
481                 if(!path_exists(cubin)) {
482                         cuda_error_message("CUDA kernel compilation failed, "
483                                            "see console for details.");
484                         return "";
485                 }
486
487                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
488
489                 return cubin;
490         }
491
492         bool load_kernels(const DeviceRequestedFeatures& requested_features)
493         {
494                 /* TODO(sergey): Support kernels re-load for CUDA devices.
495                  *
496                  * Currently re-loading kernel will invalidate memory pointers,
497                  * causing problems in cuCtxSynchronize.
498                  */
499                 if(cuFilterModule && cuModule) {
500                         VLOG(1) << "Skipping kernel reload, not currently supported.";
501                         return true;
502                 }
503
504                 /* check if cuda init succeeded */
505                 if(cuContext == 0)
506                         return false;
507
508                 /* check if GPU is supported */
509                 if(!support_device(requested_features))
510                         return false;
511
512                 /* get kernel */
513                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
514                 if(cubin == "")
515                         return false;
516
517                 string filter_cubin = compile_kernel(requested_features, true, false);
518                 if(filter_cubin == "")
519                         return false;
520
521                 /* open module */
522                 cuda_push_context();
523
524                 string cubin_data;
525                 CUresult result;
526
527                 if(path_read_text(cubin, cubin_data))
528                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
529                 else
530                         result = CUDA_ERROR_FILE_NOT_FOUND;
531
532                 if(cuda_error_(result, "cuModuleLoad"))
533                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
534
535                 if(path_read_text(filter_cubin, cubin_data))
536                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
537                 else
538                         result = CUDA_ERROR_FILE_NOT_FOUND;
539
540                 if(cuda_error_(result, "cuModuleLoad"))
541                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
542
543                 cuda_pop_context();
544
545                 return (result == CUDA_SUCCESS);
546         }
547
548         void load_bindless_mapping()
549         {
550                 if(info.has_bindless_textures && need_bindless_mapping) {
551                         tex_free(bindless_mapping);
552                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
553                         need_bindless_mapping = false;
554                 }
555         }
556
557         void mem_alloc(const char *name, device_memory& mem, MemoryType /*type*/)
558         {
559                 if(name) {
560                         VLOG(1) << "Buffer allocate: " << name << ", "
561                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
562                                 << string_human_readable_size(mem.memory_size()) << ")";
563                 }
564
565                 cuda_push_context();
566                 CUdeviceptr device_pointer;
567                 size_t size = mem.memory_size();
568                 cuda_assert(cuMemAlloc(&device_pointer, size));
569                 mem.device_pointer = (device_ptr)device_pointer;
570                 mem.device_size = size;
571                 stats.mem_alloc(size);
572                 cuda_pop_context();
573         }
574
575         void mem_copy_to(device_memory& mem)
576         {
577                 cuda_push_context();
578                 if(mem.device_pointer)
579                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
580                 cuda_pop_context();
581         }
582
583         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
584         {
585                 size_t offset = elem*y*w;
586                 size_t size = elem*w*h;
587
588                 cuda_push_context();
589                 if(mem.device_pointer) {
590                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
591                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
592                 }
593                 else {
594                         memset((char*)mem.data_pointer + offset, 0, size);
595                 }
596                 cuda_pop_context();
597         }
598
599         void mem_zero(device_memory& mem)
600         {
601                 if(mem.data_pointer) {
602                         memset((void*)mem.data_pointer, 0, mem.memory_size());
603                 }
604
605                 cuda_push_context();
606                 if(mem.device_pointer)
607                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
608                 cuda_pop_context();
609         }
610
611         void mem_free(device_memory& mem)
612         {
613                 if(mem.device_pointer) {
614                         cuda_push_context();
615                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
616                         cuda_pop_context();
617
618                         mem.device_pointer = 0;
619
620                         stats.mem_free(mem.device_size);
621                         mem.device_size = 0;
622                 }
623         }
624
625         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/, MemoryType /*type*/)
626         {
627                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
628         }
629
630         void const_copy_to(const char *name, void *host, size_t size)
631         {
632                 CUdeviceptr mem;
633                 size_t bytes;
634
635                 cuda_push_context();
636                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
637                 //assert(bytes == size);
638                 cuda_assert(cuMemcpyHtoD(mem, host, size));
639                 cuda_pop_context();
640         }
641
642         void tex_alloc(const char *name,
643                        device_memory& mem,
644                        InterpolationType interpolation,
645                        ExtensionType extension)
646         {
647                 VLOG(1) << "Texture allocate: " << name << ", "
648                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
649                         << string_human_readable_size(mem.memory_size()) << ")";
650
651                 /* Check if we are on sm_30 or above.
652                  * We use arrays and bindles textures for storage there */
653                 bool has_bindless_textures = info.has_bindless_textures;
654
655                 /* General variables for both architectures */
656                 string bind_name = name;
657                 size_t dsize = datatype_size(mem.data_type);
658                 size_t size = mem.memory_size();
659
660                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
661                 switch(extension) {
662                         case EXTENSION_REPEAT:
663                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
664                                 break;
665                         case EXTENSION_EXTEND:
666                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
667                                 break;
668                         case EXTENSION_CLIP:
669                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
670                                 break;
671                         default:
672                                 assert(0);
673                                 break;
674                 }
675
676                 CUfilter_mode filter_mode;
677                 if(interpolation == INTERPOLATION_CLOSEST) {
678                         filter_mode = CU_TR_FILTER_MODE_POINT;
679                 }
680                 else {
681                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
682                 }
683
684                 CUarray_format_enum format;
685                 switch(mem.data_type) {
686                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
687                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
688                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
689                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
690                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
691                         default: assert(0); return;
692                 }
693
694                 /* General variables for Fermi */
695                 CUtexref texref = NULL;
696
697                 if(!has_bindless_textures) {
698                         if(mem.data_depth > 1) {
699                                 /* Kernel uses different bind names for 2d and 3d float textures,
700                                  * so we have to adjust couple of things here.
701                                  */
702                                 vector<string> tokens;
703                                 string_split(tokens, name, "_");
704                                 bind_name = string_printf("__tex_image_%s_3d_%s",
705                                                           tokens[2].c_str(),
706                                                           tokens[3].c_str());
707                         }
708
709                         cuda_push_context();
710                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
711                         cuda_pop_context();
712
713                         if(!texref) {
714                                 return;
715                         }
716                 }
717
718                 /* Data Storage */
719                 if(interpolation == INTERPOLATION_NONE) {
720                         if(has_bindless_textures) {
721                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
722                                 mem_copy_to(mem);
723
724                                 cuda_push_context();
725
726                                 CUdeviceptr cumem;
727                                 size_t cubytes;
728
729                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
730
731                                 if(cubytes == 8) {
732                                         /* 64 bit device pointer */
733                                         uint64_t ptr = mem.device_pointer;
734                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
735                                 }
736                                 else {
737                                         /* 32 bit device pointer */
738                                         uint32_t ptr = (uint32_t)mem.device_pointer;
739                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
740                                 }
741
742                                 cuda_pop_context();
743                         }
744                         else {
745                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
746                                 mem_copy_to(mem);
747
748                                 cuda_push_context();
749
750                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
751                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
752                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
753
754                                 cuda_pop_context();
755                         }
756                 }
757                 /* Texture Storage */
758                 else {
759                         CUarray handle = NULL;
760
761                         cuda_push_context();
762
763                         if(mem.data_depth > 1) {
764                                 CUDA_ARRAY3D_DESCRIPTOR desc;
765
766                                 desc.Width = mem.data_width;
767                                 desc.Height = mem.data_height;
768                                 desc.Depth = mem.data_depth;
769                                 desc.Format = format;
770                                 desc.NumChannels = mem.data_elements;
771                                 desc.Flags = 0;
772
773                                 cuda_assert(cuArray3DCreate(&handle, &desc));
774                         }
775                         else {
776                                 CUDA_ARRAY_DESCRIPTOR desc;
777
778                                 desc.Width = mem.data_width;
779                                 desc.Height = mem.data_height;
780                                 desc.Format = format;
781                                 desc.NumChannels = mem.data_elements;
782
783                                 cuda_assert(cuArrayCreate(&handle, &desc));
784                         }
785
786                         if(!handle) {
787                                 cuda_pop_context();
788                                 return;
789                         }
790
791                         /* Allocate 3D, 2D or 1D memory */
792                         if(mem.data_depth > 1) {
793                                 CUDA_MEMCPY3D param;
794                                 memset(&param, 0, sizeof(param));
795                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
796                                 param.dstArray = handle;
797                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
798                                 param.srcHost = (void*)mem.data_pointer;
799                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
800                                 param.WidthInBytes = param.srcPitch;
801                                 param.Height = mem.data_height;
802                                 param.Depth = mem.data_depth;
803
804                                 cuda_assert(cuMemcpy3D(&param));
805                         }
806                         else if(mem.data_height > 1) {
807                                 CUDA_MEMCPY2D param;
808                                 memset(&param, 0, sizeof(param));
809                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
810                                 param.dstArray = handle;
811                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
812                                 param.srcHost = (void*)mem.data_pointer;
813                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
814                                 param.WidthInBytes = param.srcPitch;
815                                 param.Height = mem.data_height;
816
817                                 cuda_assert(cuMemcpy2D(&param));
818                         }
819                         else
820                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
821
822                         /* Fermi and Kepler */
823                         mem.device_pointer = (device_ptr)handle;
824                         mem.device_size = size;
825
826                         stats.mem_alloc(size);
827
828                         /* Bindless Textures - Kepler */
829                         if(has_bindless_textures) {
830                                 int flat_slot = 0;
831                                 if(string_startswith(name, "__tex_image")) {
832                                         int pos =  string(name).rfind("_");
833                                         flat_slot = atoi(name + pos + 1);
834                                 }
835                                 else {
836                                         assert(0);
837                                 }
838
839                                 CUDA_RESOURCE_DESC resDesc;
840                                 memset(&resDesc, 0, sizeof(resDesc));
841                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
842                                 resDesc.res.array.hArray = handle;
843                                 resDesc.flags = 0;
844
845                                 CUDA_TEXTURE_DESC texDesc;
846                                 memset(&texDesc, 0, sizeof(texDesc));
847                                 texDesc.addressMode[0] = address_mode;
848                                 texDesc.addressMode[1] = address_mode;
849                                 texDesc.addressMode[2] = address_mode;
850                                 texDesc.filterMode = filter_mode;
851                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
852
853                                 CUtexObject tex = 0;
854                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
855
856                                 /* Safety check */
857                                 if((uint)tex > UINT_MAX) {
858                                         assert(0);
859                                 }
860
861                                 /* Resize once */
862                                 if(flat_slot >= bindless_mapping.size()) {
863                                         /* Allocate some slots in advance, to reduce amount
864                                          * of re-allocations.
865                                          */
866                                         bindless_mapping.resize(flat_slot + 128);
867                                 }
868
869                                 /* Set Mapping and tag that we need to (re-)upload to device */
870                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
871                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
872                                 need_bindless_mapping = true;
873                         }
874                         /* Regular Textures - Fermi */
875                         else {
876                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
877                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
878                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
879                         }
880
881                         cuda_pop_context();
882                 }
883
884                 /* Fermi, Data and Image Textures */
885                 if(!has_bindless_textures) {
886                         cuda_push_context();
887
888                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
889                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
890                         if(mem.data_depth > 1) {
891                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
892                         }
893
894                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
895
896                         cuda_pop_context();
897                 }
898
899                 /* Fermi and Kepler */
900                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
901         }
902
903         void tex_free(device_memory& mem)
904         {
905                 if(mem.device_pointer) {
906                         if(tex_interp_map[mem.device_pointer]) {
907                                 cuda_push_context();
908                                 cuArrayDestroy((CUarray)mem.device_pointer);
909                                 cuda_pop_context();
910
911                                 /* Free CUtexObject (Bindless Textures) */
912                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
913                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
914                                         cuTexObjectDestroy(flat_slot);
915                                 }
916
917                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
918                                 mem.device_pointer = 0;
919
920                                 stats.mem_free(mem.device_size);
921                                 mem.device_size = 0;
922                         }
923                         else {
924                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
925                                 mem_free(mem);
926                         }
927                 }
928         }
929
930         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
931         {
932                 mem_alloc("Denoising Tile Info", task->tiles_mem, MEM_READ_ONLY);
933
934                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.data_pointer;
935                 for(int i = 0; i < 9; i++) {
936                         tiles->buffers[i] = buffers[i];
937                 }
938
939                 mem_copy_to(task->tiles_mem);
940
941                 return !have_error();
942         }
943
944 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
945                         int threads_per_block;                                                                              \
946                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
947                         int threads = (int)sqrt((float)threads_per_block);                                                  \
948                         int xblocks = ((w) + threads - 1)/threads;                                                          \
949                         int yblocks = ((h) + threads - 1)/threads;
950
951 #define CUDA_LAUNCH_KERNEL(func, args)                      \
952                         cuda_assert(cuLaunchKernel(func,                \
953                                                    xblocks, yblocks, 1, \
954                                                    threads, threads, 1, \
955                                                    0, 0, args, 0));
956
957         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
958                                        DenoisingTask *task)
959         {
960                 if(have_error())
961                         return false;
962
963                 cuda_push_context();
964
965                 int4 rect = task->rect;
966                 int w = align_up(rect.z-rect.x, 4);
967                 int h = rect.w-rect.y;
968                 int r = task->nlm_state.r;
969                 int f = task->nlm_state.f;
970                 float a = task->nlm_state.a;
971                 float k_2 = task->nlm_state.k_2;
972
973                 CUdeviceptr difference     = task->nlm_state.temporary_1_ptr;
974                 CUdeviceptr blurDifference = task->nlm_state.temporary_2_ptr;
975                 CUdeviceptr weightAccum    = task->nlm_state.temporary_3_ptr;
976
977                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*w*h));
978                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*w*h));
979
980                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput, cuNLMNormalize;
981                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
982                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
983                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
984                 cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
985                 cuda_assert(cuModuleGetFunction(&cuNLMNormalize,      cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
986
987                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
988                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
989                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
990                 cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
991                 cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize,      CU_FUNC_CACHE_PREFER_L1));
992
993                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference, rect.z-rect.x, rect.w-rect.y);
994
995                 int dx, dy;
996                 int4 local_rect;
997                 int channel_offset = 0;
998                 void *calc_difference_args[] = {&dx, &dy, &guide_ptr, &variance_ptr, &difference, &local_rect, &w, &channel_offset, &a, &k_2};
999                 void *blur_args[]            = {&difference, &blurDifference, &local_rect, &w, &f};
1000                 void *calc_weight_args[]     = {&blurDifference, &difference, &local_rect, &w, &f};
1001                 void *update_output_args[]   = {&dx, &dy, &blurDifference, &image_ptr, &out_ptr, &weightAccum, &local_rect, &w, &f};
1002
1003                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1004                         dy = i / (2*r+1) - r;
1005                         dx = i % (2*r+1) - r;
1006                         local_rect = make_int4(max(0, -dx), max(0, -dy), rect.z-rect.x - max(0, dx), rect.w-rect.y - max(0, dy));
1007
1008                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1009                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1010                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1011                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1012                         CUDA_LAUNCH_KERNEL(cuNLMUpdateOutput, update_output_args);
1013                 }
1014
1015                 local_rect = make_int4(0, 0, rect.z-rect.x, rect.w-rect.y);
1016                 void *normalize_args[] = {&out_ptr, &weightAccum, &local_rect, &w};
1017                 CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1018                 cuda_assert(cuCtxSynchronize());
1019
1020                 cuda_pop_context();
1021                 return !have_error();
1022         }
1023
1024         bool denoising_construct_transform(DenoisingTask *task)
1025         {
1026                 if(have_error())
1027                         return false;
1028
1029                 cuda_push_context();
1030
1031                 CUfunction cuFilterConstructTransform;
1032                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1033                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1034                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1035                                    task->storage.w,
1036                                    task->storage.h);
1037
1038                 void *args[] = {&task->buffer.mem.device_pointer,
1039                                 &task->storage.transform.device_pointer,
1040                                 &task->storage.rank.device_pointer,
1041                                 &task->filter_area,
1042                                 &task->rect,
1043                                 &task->radius,
1044                                 &task->pca_threshold,
1045                                 &task->buffer.pass_stride};
1046                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1047                 cuda_assert(cuCtxSynchronize());
1048
1049                 cuda_pop_context();
1050                 return !have_error();
1051         }
1052
1053         bool denoising_reconstruct(device_ptr color_ptr,
1054                                    device_ptr color_variance_ptr,
1055                                    device_ptr output_ptr,
1056                                    DenoisingTask *task)
1057         {
1058                 if(have_error())
1059                         return false;
1060
1061                 mem_zero(task->storage.XtWX);
1062                 mem_zero(task->storage.XtWY);
1063
1064                 cuda_push_context();
1065
1066                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian, cuFinalize;
1067                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1068                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1069                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1070                 cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1071                 cuda_assert(cuModuleGetFunction(&cuFinalize,            cuFilterModule, "kernel_cuda_filter_finalize"));
1072
1073                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1074                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1075                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1076                 cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1077                 cuda_assert(cuFuncSetCacheConfig(cuFinalize,            CU_FUNC_CACHE_PREFER_L1));
1078
1079                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference,
1080                                    task->reconstruction_state.source_w,
1081                                    task->reconstruction_state.source_h);
1082
1083                 CUdeviceptr difference     = task->reconstruction_state.temporary_1_ptr;
1084                 CUdeviceptr blurDifference = task->reconstruction_state.temporary_2_ptr;
1085
1086                 int r = task->radius;
1087                 int f = 4;
1088                 float a = 1.0f;
1089                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1090                         int dy = i / (2*r+1) - r;
1091                         int dx = i % (2*r+1) - r;
1092
1093                         int local_rect[4] = {max(0, -dx), max(0, -dy),
1094                                              task->reconstruction_state.source_w - max(0, dx),
1095                                              task->reconstruction_state.source_h - max(0, dy)};
1096
1097                         void *calc_difference_args[] = {&dx, &dy,
1098                                                         &color_ptr,
1099                                                         &color_variance_ptr,
1100                                                         &difference,
1101                                                         &local_rect,
1102                                                         &task->buffer.w,
1103                                                         &task->buffer.pass_stride,
1104                                                         &a,
1105                                                         &task->nlm_k_2};
1106                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1107
1108                         void *blur_args[] = {&difference,
1109                                              &blurDifference,
1110                                              &local_rect,
1111                                              &task->buffer.w,
1112                                              &f};
1113                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1114
1115                         void *calc_weight_args[] = {&blurDifference,
1116                                                     &difference,
1117                                                     &local_rect,
1118                                                     &task->buffer.w,
1119                                                     &f};
1120                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1121
1122                         /* Reuse previous arguments. */
1123                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1124
1125                         void *construct_gramian_args[] = {&dx, &dy,
1126                                                           &blurDifference,
1127                                                           &task->buffer.mem.device_pointer,
1128                                                           &task->storage.transform.device_pointer,
1129                                                           &task->storage.rank.device_pointer,
1130                                                           &task->storage.XtWX.device_pointer,
1131                                                           &task->storage.XtWY.device_pointer,
1132                                                           &local_rect,
1133                                                           &task->reconstruction_state.filter_rect,
1134                                                           &task->buffer.w,
1135                                                           &task->buffer.h,
1136                                                           &f,
1137                                                       &task->buffer.pass_stride};
1138                         CUDA_LAUNCH_KERNEL(cuNLMConstructGramian, construct_gramian_args);
1139                 }
1140
1141                 void *finalize_args[] = {&task->buffer.w,
1142                                          &task->buffer.h,
1143                                          &output_ptr,
1144                                                  &task->storage.rank.device_pointer,
1145                                                  &task->storage.XtWX.device_pointer,
1146                                                  &task->storage.XtWY.device_pointer,
1147                                                  &task->filter_area,
1148                                                  &task->reconstruction_state.buffer_params.x,
1149                                                  &task->render_buffer.samples};
1150                 CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1151                 cuda_assert(cuCtxSynchronize());
1152
1153                 cuda_pop_context();
1154                 return !have_error();
1155         }
1156
1157         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1158                                       device_ptr mean_ptr, device_ptr variance_ptr,
1159                                       int r, int4 rect, DenoisingTask *task)
1160         {
1161                 if(have_error())
1162                         return false;
1163
1164                 cuda_push_context();
1165
1166                 CUfunction cuFilterCombineHalves;
1167                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1168                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1169                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1170                                    task->rect.z-task->rect.x,
1171                                    task->rect.w-task->rect.y);
1172
1173                 void *args[] = {&mean_ptr,
1174                                 &variance_ptr,
1175                                 &a_ptr,
1176                                 &b_ptr,
1177                                 &rect,
1178                                 &r};
1179                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1180                 cuda_assert(cuCtxSynchronize());
1181
1182                 cuda_pop_context();
1183                 return !have_error();
1184         }
1185
1186         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1187                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1188                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1189         {
1190                 if(have_error())
1191                         return false;
1192
1193                 cuda_push_context();
1194
1195                 CUfunction cuFilterDivideShadow;
1196                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1197                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1198                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1199                                    task->rect.z-task->rect.x,
1200                                    task->rect.w-task->rect.y);
1201
1202                 bool use_split_variance = use_split_kernel();
1203                 void *args[] = {&task->render_buffer.samples,
1204                                 &task->tiles_mem.device_pointer,
1205                                 &a_ptr,
1206                                 &b_ptr,
1207                                 &sample_variance_ptr,
1208                                 &sv_variance_ptr,
1209                                 &buffer_variance_ptr,
1210                                 &task->rect,
1211                                 &task->render_buffer.pass_stride,
1212                                 &task->render_buffer.denoising_data_offset,
1213                                 &use_split_variance};
1214                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1215                 cuda_assert(cuCtxSynchronize());
1216
1217                 cuda_pop_context();
1218                 return !have_error();
1219         }
1220
1221         bool denoising_get_feature(int mean_offset,
1222                                    int variance_offset,
1223                                    device_ptr mean_ptr,
1224                                    device_ptr variance_ptr,
1225                                    DenoisingTask *task)
1226         {
1227                 if(have_error())
1228                         return false;
1229
1230                 cuda_push_context();
1231
1232                 CUfunction cuFilterGetFeature;
1233                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1234                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1235                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1236                                    task->rect.z-task->rect.x,
1237                                    task->rect.w-task->rect.y);
1238
1239                 bool use_split_variance = use_split_kernel();
1240                 void *args[] = {&task->render_buffer.samples,
1241                                 &task->tiles_mem.device_pointer,
1242                                         &mean_offset,
1243                                         &variance_offset,
1244                                 &mean_ptr,
1245                                 &variance_ptr,
1246                                 &task->rect,
1247                                 &task->render_buffer.pass_stride,
1248                                 &task->render_buffer.denoising_data_offset,
1249                                 &use_split_variance};
1250                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1251                 cuda_assert(cuCtxSynchronize());
1252
1253                 cuda_pop_context();
1254                 return !have_error();
1255         }
1256
1257         bool denoising_detect_outliers(device_ptr image_ptr,
1258                                        device_ptr variance_ptr,
1259                                        device_ptr depth_ptr,
1260                                        device_ptr output_ptr,
1261                                        DenoisingTask *task)
1262         {
1263                 if(have_error())
1264                         return false;
1265
1266                 cuda_push_context();
1267
1268                 CUfunction cuFilterDetectOutliers;
1269                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1270                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1271                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1272                                    task->rect.z-task->rect.x,
1273                                    task->rect.w-task->rect.y);
1274
1275                 void *args[] = {&image_ptr,
1276                                 &variance_ptr,
1277                                 &depth_ptr,
1278                                 &output_ptr,
1279                                 &task->rect,
1280                                 &task->buffer.pass_stride};
1281
1282                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1283                 cuda_assert(cuCtxSynchronize());
1284
1285                 cuda_pop_context();
1286                 return !have_error();
1287         }
1288
1289         void denoise(RenderTile &rtile, const DeviceTask &task)
1290         {
1291                 DenoisingTask denoising(this);
1292
1293                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1294                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1295                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1296                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1297                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1298                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1299                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1300                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1301
1302                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1303                 denoising.render_buffer.samples = rtile.sample;
1304
1305                 RenderTile rtiles[9];
1306                 rtiles[4] = rtile;
1307                 task.map_neighbor_tiles(rtiles, this);
1308                 denoising.tiles_from_rendertiles(rtiles);
1309
1310                 denoising.init_from_devicetask(task);
1311
1312                 denoising.run_denoising();
1313
1314                 task.unmap_neighbor_tiles(rtiles, this);
1315         }
1316
1317         void path_trace(RenderTile& rtile, int sample, bool branched)
1318         {
1319                 if(have_error())
1320                         return;
1321
1322                 cuda_push_context();
1323
1324                 CUfunction cuPathTrace;
1325                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
1326                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
1327
1328                 /* get kernel function */
1329                 if(branched) {
1330                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1331                 }
1332                 else {
1333                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1334                 }
1335
1336                 if(have_error())
1337                         return;
1338
1339                 /* pass in parameters */
1340                 void *args[] = {&d_buffer,
1341                                 &d_rng_state,
1342                                 &sample,
1343                                 &rtile.x,
1344                                 &rtile.y,
1345                                 &rtile.w,
1346                                 &rtile.h,
1347                                 &rtile.offset,
1348                                 &rtile.stride};
1349
1350                 /* launch kernel */
1351                 int threads_per_block;
1352                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
1353
1354                 /*int num_registers;
1355                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
1356
1357                 printf("threads_per_block %d\n", threads_per_block);
1358                 printf("num_registers %d\n", num_registers);*/
1359
1360                 int xthreads = (int)sqrt(threads_per_block);
1361                 int ythreads = (int)sqrt(threads_per_block);
1362                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
1363                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
1364
1365                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1366
1367                 cuda_assert(cuLaunchKernel(cuPathTrace,
1368                                            xblocks , yblocks, 1, /* blocks */
1369                                            xthreads, ythreads, 1, /* threads */
1370                                            0, 0, args, 0));
1371
1372                 cuda_assert(cuCtxSynchronize());
1373
1374                 cuda_pop_context();
1375         }
1376
1377         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1378         {
1379                 if(have_error())
1380                         return;
1381
1382                 cuda_push_context();
1383
1384                 CUfunction cuFilmConvert;
1385                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1386                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1387
1388                 /* get kernel function */
1389                 if(rgba_half) {
1390                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1391                 }
1392                 else {
1393                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1394                 }
1395
1396
1397                 float sample_scale = 1.0f/(task.sample + 1);
1398
1399                 /* pass in parameters */
1400                 void *args[] = {&d_rgba,
1401                                 &d_buffer,
1402                                 &sample_scale,
1403                                 &task.x,
1404                                 &task.y,
1405                                 &task.w,
1406                                 &task.h,
1407                                 &task.offset,
1408                                 &task.stride};
1409
1410                 /* launch kernel */
1411                 int threads_per_block;
1412                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1413
1414                 int xthreads = (int)sqrt(threads_per_block);
1415                 int ythreads = (int)sqrt(threads_per_block);
1416                 int xblocks = (task.w + xthreads - 1)/xthreads;
1417                 int yblocks = (task.h + ythreads - 1)/ythreads;
1418
1419                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1420
1421                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1422                                            xblocks , yblocks, 1, /* blocks */
1423                                            xthreads, ythreads, 1, /* threads */
1424                                            0, 0, args, 0));
1425
1426                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1427
1428                 cuda_pop_context();
1429         }
1430
1431         void shader(DeviceTask& task)
1432         {
1433                 if(have_error())
1434                         return;
1435
1436                 cuda_push_context();
1437
1438                 CUfunction cuShader;
1439                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1440                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1441                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
1442
1443                 /* get kernel function */
1444                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1445                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1446                 }
1447                 else {
1448                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
1449                 }
1450
1451                 /* do tasks in smaller chunks, so we can cancel it */
1452                 const int shader_chunk_size = 65536;
1453                 const int start = task.shader_x;
1454                 const int end = task.shader_x + task.shader_w;
1455                 int offset = task.offset;
1456
1457                 bool canceled = false;
1458                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1459                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1460                                 int shader_w = min(shader_chunk_size, end - shader_x);
1461
1462                                 /* pass in parameters */
1463                                 void *args[8];
1464                                 int arg = 0;
1465                                 args[arg++] = &d_input;
1466                                 args[arg++] = &d_output;
1467                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
1468                                         args[arg++] = &d_output_luma;
1469                                 }
1470                                 args[arg++] = &task.shader_eval_type;
1471                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1472                                         args[arg++] = &task.shader_filter;
1473                                 }
1474                                 args[arg++] = &shader_x;
1475                                 args[arg++] = &shader_w;
1476                                 args[arg++] = &offset;
1477                                 args[arg++] = &sample;
1478
1479                                 /* launch kernel */
1480                                 int threads_per_block;
1481                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1482
1483                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1484
1485                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1486                                 cuda_assert(cuLaunchKernel(cuShader,
1487                                                            xblocks , 1, 1, /* blocks */
1488                                                            threads_per_block, 1, 1, /* threads */
1489                                                            0, 0, args, 0));
1490
1491                                 cuda_assert(cuCtxSynchronize());
1492
1493                                 if(task.get_cancel()) {
1494                                         canceled = true;
1495                                         break;
1496                                 }
1497                         }
1498
1499                         task.update_progress(NULL);
1500                 }
1501
1502                 cuda_pop_context();
1503         }
1504
1505         CUdeviceptr map_pixels(device_ptr mem)
1506         {
1507                 if(!background) {
1508                         PixelMem pmem = pixel_mem_map[mem];
1509                         CUdeviceptr buffer;
1510
1511                         size_t bytes;
1512                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1513                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1514
1515                         return buffer;
1516                 }
1517
1518                 return cuda_device_ptr(mem);
1519         }
1520
1521         void unmap_pixels(device_ptr mem)
1522         {
1523                 if(!background) {
1524                         PixelMem pmem = pixel_mem_map[mem];
1525
1526                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1527                 }
1528         }
1529
1530         void pixels_alloc(device_memory& mem)
1531         {
1532                 if(!background) {
1533                         PixelMem pmem;
1534
1535                         pmem.w = mem.data_width;
1536                         pmem.h = mem.data_height;
1537
1538                         cuda_push_context();
1539
1540                         glGenBuffers(1, &pmem.cuPBO);
1541                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1542                         if(mem.data_type == TYPE_HALF)
1543                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1544                         else
1545                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1546
1547                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1548
1549                         glGenTextures(1, &pmem.cuTexId);
1550                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1551                         if(mem.data_type == TYPE_HALF)
1552                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1553                         else
1554                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1555                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1556                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1557                         glBindTexture(GL_TEXTURE_2D, 0);
1558
1559                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1560
1561                         if(result == CUDA_SUCCESS) {
1562                                 cuda_pop_context();
1563
1564                                 mem.device_pointer = pmem.cuTexId;
1565                                 pixel_mem_map[mem.device_pointer] = pmem;
1566
1567                                 mem.device_size = mem.memory_size();
1568                                 stats.mem_alloc(mem.device_size);
1569
1570                                 return;
1571                         }
1572                         else {
1573                                 /* failed to register buffer, fallback to no interop */
1574                                 glDeleteBuffers(1, &pmem.cuPBO);
1575                                 glDeleteTextures(1, &pmem.cuTexId);
1576
1577                                 cuda_pop_context();
1578
1579                                 background = true;
1580                         }
1581                 }
1582
1583                 Device::pixels_alloc(mem);
1584         }
1585
1586         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1587         {
1588                 if(!background) {
1589                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1590
1591                         cuda_push_context();
1592
1593                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1594                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1595                         size_t offset = sizeof(uchar)*4*y*w;
1596                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1597                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1598                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1599
1600                         cuda_pop_context();
1601
1602                         return;
1603                 }
1604
1605                 Device::pixels_copy_from(mem, y, w, h);
1606         }
1607
1608         void pixels_free(device_memory& mem)
1609         {
1610                 if(mem.device_pointer) {
1611                         if(!background) {
1612                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1613
1614                                 cuda_push_context();
1615
1616                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1617                                 glDeleteBuffers(1, &pmem.cuPBO);
1618                                 glDeleteTextures(1, &pmem.cuTexId);
1619
1620                                 cuda_pop_context();
1621
1622                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1623                                 mem.device_pointer = 0;
1624
1625                                 stats.mem_free(mem.device_size);
1626                                 mem.device_size = 0;
1627
1628                                 return;
1629                         }
1630
1631                         Device::pixels_free(mem);
1632                 }
1633         }
1634
1635         void draw_pixels(
1636             device_memory& mem, int y,
1637             int w, int h, int width, int height,
1638             int dx, int dy, int dw, int dh, bool transparent,
1639                 const DeviceDrawParams &draw_params)
1640         {
1641                 if(!background) {
1642                         const bool use_fallback_shader = (draw_params.bind_display_space_shader_cb == NULL);
1643                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1644                         float *vpointer;
1645
1646                         cuda_push_context();
1647
1648                         /* for multi devices, this assumes the inefficient method that we allocate
1649                          * all pixels on the device even though we only render to a subset */
1650                         size_t offset = 4*y*w;
1651
1652                         if(mem.data_type == TYPE_HALF)
1653                                 offset *= sizeof(GLhalf);
1654                         else
1655                                 offset *= sizeof(uint8_t);
1656
1657                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1658                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1659                         if(mem.data_type == TYPE_HALF) {
1660                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1661                         }
1662                         else {
1663                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1664                         }
1665                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1666
1667                         glEnable(GL_TEXTURE_2D);
1668
1669                         if(transparent) {
1670                                 glEnable(GL_BLEND);
1671                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1672                         }
1673
1674                         GLint shader_program;
1675                         if(use_fallback_shader) {
1676                                 if(!bind_fallback_display_space_shader(dw, dh)) {
1677                                         return;
1678                                 }
1679                                 shader_program = fallback_shader_program;
1680                         }
1681                         else {
1682                                 draw_params.bind_display_space_shader_cb();
1683                                 glGetIntegerv(GL_CURRENT_PROGRAM, &shader_program);
1684                         }
1685
1686                         if(!vertex_buffer) {
1687                                 glGenBuffers(1, &vertex_buffer);
1688                         }
1689
1690                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1691                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1692                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1693
1694                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1695
1696                         if(vpointer) {
1697                                 /* texture coordinate - vertex pair */
1698                                 vpointer[0] = 0.0f;
1699                                 vpointer[1] = 0.0f;
1700                                 vpointer[2] = dx;
1701                                 vpointer[3] = dy;
1702
1703                                 vpointer[4] = (float)w/(float)pmem.w;
1704                                 vpointer[5] = 0.0f;
1705                                 vpointer[6] = (float)width + dx;
1706                                 vpointer[7] = dy;
1707
1708                                 vpointer[8] = (float)w/(float)pmem.w;
1709                                 vpointer[9] = (float)h/(float)pmem.h;
1710                                 vpointer[10] = (float)width + dx;
1711                                 vpointer[11] = (float)height + dy;
1712
1713                                 vpointer[12] = 0.0f;
1714                                 vpointer[13] = (float)h/(float)pmem.h;
1715                                 vpointer[14] = dx;
1716                                 vpointer[15] = (float)height + dy;
1717
1718                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1719                         }
1720
1721                         GLuint vertex_array_object;
1722                         GLuint position_attribute, texcoord_attribute;
1723
1724                         glGenVertexArrays(1, &vertex_array_object);
1725                         glBindVertexArray(vertex_array_object);
1726
1727                         texcoord_attribute = glGetAttribLocation(shader_program, "texCoord");
1728                         position_attribute = glGetAttribLocation(shader_program, "pos");
1729
1730                         glEnableVertexAttribArray(texcoord_attribute);
1731                         glEnableVertexAttribArray(position_attribute);
1732
1733                         glVertexAttribPointer(texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
1734                         glVertexAttribPointer(position_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)(sizeof(float) * 2));
1735
1736                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1737
1738                         if(use_fallback_shader) {
1739                                 glUseProgram(0);
1740                         }
1741                         else {
1742                                 draw_params.unbind_display_space_shader_cb();
1743                         }
1744
1745                         if(transparent) {
1746                                 glDisable(GL_BLEND);
1747                         }
1748
1749                         glBindTexture(GL_TEXTURE_2D, 0);
1750                         glDisable(GL_TEXTURE_2D);
1751
1752                         cuda_pop_context();
1753
1754                         return;
1755                 }
1756
1757                 Device::draw_pixels(mem, y, w, h, width, height, dx, dy, dw, dh, transparent, draw_params);
1758         }
1759
1760         void thread_run(DeviceTask *task)
1761         {
1762                 if(task->type == DeviceTask::RENDER) {
1763                         RenderTile tile;
1764
1765                         bool branched = task->integrator_branched;
1766
1767                         /* Upload Bindless Mapping */
1768                         load_bindless_mapping();
1769
1770                         DeviceRequestedFeatures requested_features;
1771                         if(use_split_kernel()) {
1772                                 if(!use_adaptive_compilation()) {
1773                                         requested_features.max_closure = 64;
1774                                 }
1775
1776                                 if(split_kernel == NULL) {
1777                                         split_kernel = new CUDASplitKernel(this);
1778                                         split_kernel->load_kernels(requested_features);
1779                                 }
1780                         }
1781
1782                         /* keep rendering tiles until done */
1783                         while(task->acquire_tile(this, tile)) {
1784                                 if(tile.task == RenderTile::PATH_TRACE) {
1785                                         if(use_split_kernel()) {
1786                                                 device_memory void_buffer;
1787                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
1788                                         }
1789                                         else {
1790                                                 int start_sample = tile.start_sample;
1791                                                 int end_sample = tile.start_sample + tile.num_samples;
1792
1793                                                 for(int sample = start_sample; sample < end_sample; sample++) {
1794                                                         if(task->get_cancel()) {
1795                                                                 if(task->need_finish_queue == false)
1796                                                                         break;
1797                                                         }
1798
1799                                                         path_trace(tile, sample, branched);
1800
1801                                                         tile.sample = sample + 1;
1802
1803                                                         task->update_progress(&tile, tile.w*tile.h);
1804                                                 }
1805                                         }
1806                                 }
1807                                 else if(tile.task == RenderTile::DENOISE) {
1808                                         tile.sample = tile.start_sample + tile.num_samples;
1809
1810                                         denoise(tile, *task);
1811
1812                                         task->update_progress(&tile, tile.w*tile.h);
1813                                 }
1814
1815                                 task->release_tile(tile);
1816
1817                                 if(task->get_cancel()) {
1818                                         if(task->need_finish_queue == false)
1819                                                 break;
1820                                 }
1821                         }
1822                 }
1823                 else if(task->type == DeviceTask::SHADER) {
1824                         /* Upload Bindless Mapping */
1825                         load_bindless_mapping();
1826
1827                         shader(*task);
1828
1829                         cuda_push_context();
1830                         cuda_assert(cuCtxSynchronize());
1831                         cuda_pop_context();
1832                 }
1833         }
1834
1835         class CUDADeviceTask : public DeviceTask {
1836         public:
1837                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1838                 : DeviceTask(task)
1839                 {
1840                         run = function_bind(&CUDADevice::thread_run, device, this);
1841                 }
1842         };
1843
1844         int get_split_task_count(DeviceTask& /*task*/)
1845         {
1846                 return 1;
1847         }
1848
1849         void task_add(DeviceTask& task)
1850         {
1851                 if(task.type == DeviceTask::FILM_CONVERT) {
1852                         /* must be done in main thread due to opengl access */
1853                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1854
1855                         cuda_push_context();
1856                         cuda_assert(cuCtxSynchronize());
1857                         cuda_pop_context();
1858                 }
1859                 else {
1860                         task_pool.push(new CUDADeviceTask(this, task));
1861                 }
1862         }
1863
1864         void task_wait()
1865         {
1866                 task_pool.wait();
1867         }
1868
1869         void task_cancel()
1870         {
1871                 task_pool.cancel();
1872         }
1873
1874         friend class CUDASplitKernelFunction;
1875         friend class CUDASplitKernel;
1876 };
1877
1878 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1879  * now that the definition of that class is complete
1880  */
1881 #undef cuda_assert
1882 #define cuda_assert(stmt) \
1883         { \
1884                 CUresult result = stmt; \
1885                 \
1886                 if(result != CUDA_SUCCESS) { \
1887                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1888                         if(device->error_msg == "") \
1889                                 device->error_msg = message; \
1890                         fprintf(stderr, "%s\n", message.c_str()); \
1891                         /*cuda_abort();*/ \
1892                         device->cuda_error_documentation(); \
1893                 } \
1894         } (void)0
1895
1896 /* split kernel */
1897
1898 class CUDASplitKernelFunction : public SplitKernelFunction{
1899         CUDADevice* device;
1900         CUfunction func;
1901 public:
1902         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
1903
1904         /* enqueue the kernel, returns false if there is an error */
1905         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
1906         {
1907                 return enqueue(dim, NULL);
1908         }
1909
1910         /* enqueue the kernel, returns false if there is an error */
1911         bool enqueue(const KernelDimensions &dim, void *args[])
1912         {
1913                 device->cuda_push_context();
1914
1915                 if(device->have_error())
1916                         return false;
1917
1918                 /* we ignore dim.local_size for now, as this is faster */
1919                 int threads_per_block;
1920                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
1921
1922                 int xthreads = (int)sqrt(threads_per_block);
1923                 int ythreads = (int)sqrt(threads_per_block);
1924
1925                 int xblocks = (dim.global_size[0] + xthreads - 1)/xthreads;
1926                 int yblocks = (dim.global_size[1] + ythreads - 1)/ythreads;
1927
1928                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
1929
1930                 cuda_assert(cuLaunchKernel(func,
1931                                            xblocks , yblocks, 1, /* blocks */
1932                                            xthreads, ythreads, 1, /* threads */
1933                                            0, 0, args, 0));
1934
1935                 device->cuda_pop_context();
1936
1937                 return !device->have_error();
1938         }
1939 };
1940
1941 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
1942 {
1943 }
1944
1945 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
1946 {
1947         device_vector<uint64_t> size_buffer;
1948         size_buffer.resize(1);
1949         device->mem_alloc(NULL, size_buffer, MEM_READ_WRITE);
1950
1951         device->cuda_push_context();
1952
1953         uint threads = num_threads;
1954         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
1955
1956         struct args_t {
1957                 uint* num_threads;
1958                 CUdeviceptr* size;
1959         };
1960
1961         args_t args = {
1962                 &threads,
1963                 &d_size
1964         };
1965
1966         CUfunction state_buffer_size;
1967         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
1968
1969         cuda_assert(cuLaunchKernel(state_buffer_size,
1970                                    1, 1, 1,
1971                                    1, 1, 1,
1972                                    0, 0, (void**)&args, 0));
1973
1974         device->cuda_pop_context();
1975
1976         device->mem_copy_from(size_buffer, 0, 1, 1, sizeof(uint64_t));
1977         device->mem_free(size_buffer);
1978
1979         return *size_buffer.get_data();
1980 }
1981
1982 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
1983                                     RenderTile& rtile,
1984                                     int num_global_elements,
1985                                     device_memory& /*kernel_globals*/,
1986                                     device_memory& /*kernel_data*/,
1987                                     device_memory& split_data,
1988                                     device_memory& ray_state,
1989                                     device_memory& queue_index,
1990                                     device_memory& use_queues_flag,
1991                                     device_memory& work_pool_wgs)
1992 {
1993         device->cuda_push_context();
1994
1995         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
1996         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
1997         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
1998         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
1999         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
2000
2001         CUdeviceptr d_rng_state = device->cuda_device_ptr(rtile.rng_state);
2002         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
2003
2004         int end_sample = rtile.start_sample + rtile.num_samples;
2005         int queue_size = dim.global_size[0] * dim.global_size[1];
2006
2007         struct args_t {
2008                 CUdeviceptr* split_data_buffer;
2009                 int* num_elements;
2010                 CUdeviceptr* ray_state;
2011                 CUdeviceptr* rng_state;
2012                 int* start_sample;
2013                 int* end_sample;
2014                 int* sx;
2015                 int* sy;
2016                 int* sw;
2017                 int* sh;
2018                 int* offset;
2019                 int* stride;
2020                 CUdeviceptr* queue_index;
2021                 int* queuesize;
2022                 CUdeviceptr* use_queues_flag;
2023                 CUdeviceptr* work_pool_wgs;
2024                 int* num_samples;
2025                 CUdeviceptr* buffer;
2026         };
2027
2028         args_t args = {
2029                 &d_split_data,
2030                 &num_global_elements,
2031                 &d_ray_state,
2032                 &d_rng_state,
2033                 &rtile.start_sample,
2034                 &end_sample,
2035                 &rtile.x,
2036                 &rtile.y,
2037                 &rtile.w,
2038                 &rtile.h,
2039                 &rtile.offset,
2040                 &rtile.stride,
2041                 &d_queue_index,
2042                 &queue_size,
2043                 &d_use_queues_flag,
2044                 &d_work_pool_wgs,
2045                 &rtile.num_samples,
2046                 &d_buffer
2047         };
2048
2049         CUfunction data_init;
2050         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2051         if(device->have_error()) {
2052                 return false;
2053         }
2054
2055         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2056
2057         device->cuda_pop_context();
2058
2059         return !device->have_error();
2060 }
2061
2062 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2063                                                                 const DeviceRequestedFeatures&)
2064 {
2065         CUfunction func;
2066
2067         device->cuda_push_context();
2068
2069         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2070         if(device->have_error()) {
2071                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2072                 return NULL;
2073         }
2074
2075         device->cuda_pop_context();
2076
2077         return new CUDASplitKernelFunction(device, func);
2078 }
2079
2080 int2 CUDASplitKernel::split_kernel_local_size()
2081 {
2082         return make_int2(32, 1);
2083 }
2084
2085 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2086 {
2087         size_t free;
2088         size_t total;
2089
2090         device->cuda_push_context();
2091         cuda_assert(cuMemGetInfo(&free, &total));
2092         device->cuda_pop_context();
2093
2094         VLOG(1) << "Maximum device allocation size: "
2095                 << string_human_readable_number(free) << " bytes. ("
2096                 << string_human_readable_size(free) << ").";
2097
2098         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2099         size_t side = round_down((int)sqrt(num_elements), 32);
2100         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2101         VLOG(1) << "Global size: " << global_size << ".";
2102         return global_size;
2103 }
2104
2105 bool device_cuda_init(void)
2106 {
2107 #ifdef WITH_CUDA_DYNLOAD
2108         static bool initialized = false;
2109         static bool result = false;
2110
2111         if(initialized)
2112                 return result;
2113
2114         initialized = true;
2115         int cuew_result = cuewInit();
2116         if(cuew_result == CUEW_SUCCESS) {
2117                 VLOG(1) << "CUEW initialization succeeded";
2118                 if(CUDADevice::have_precompiled_kernels()) {
2119                         VLOG(1) << "Found precompiled kernels";
2120                         result = true;
2121                 }
2122 #ifndef _WIN32
2123                 else if(cuewCompilerPath() != NULL) {
2124                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2125                         result = true;
2126                 }
2127                 else {
2128                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
2129                                 << " unable to use CUDA";
2130                 }
2131 #endif
2132         }
2133         else {
2134                 VLOG(1) << "CUEW initialization failed: "
2135                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2136                             ? "Error setting up atexit() handler"
2137                             : "Error opening the library");
2138         }
2139
2140         return result;
2141 #else  /* WITH_CUDA_DYNLOAD */
2142         return true;
2143 #endif /* WITH_CUDA_DYNLOAD */
2144 }
2145
2146 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2147 {
2148         return new CUDADevice(info, stats, background);
2149 }
2150
2151 void device_cuda_info(vector<DeviceInfo>& devices)
2152 {
2153         CUresult result;
2154         int count = 0;
2155
2156         result = cuInit(0);
2157         if(result != CUDA_SUCCESS) {
2158                 if(result != CUDA_ERROR_NO_DEVICE)
2159                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2160                 return;
2161         }
2162
2163         result = cuDeviceGetCount(&count);
2164         if(result != CUDA_SUCCESS) {
2165                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2166                 return;
2167         }
2168
2169         vector<DeviceInfo> display_devices;
2170
2171         for(int num = 0; num < count; num++) {
2172                 char name[256];
2173                 int attr;
2174
2175                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
2176                         continue;
2177
2178                 int major;
2179                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2180                 if(major < 2) {
2181                         continue;
2182                 }
2183
2184                 DeviceInfo info;
2185
2186                 info.type = DEVICE_CUDA;
2187                 info.description = string(name);
2188                 info.num = num;
2189
2190                 info.advanced_shading = (major >= 2);
2191                 info.has_bindless_textures = (major >= 3);
2192                 info.pack_images = false;
2193
2194                 int pci_location[3] = {0, 0, 0};
2195                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2196                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2197                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2198                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2199                                         name,
2200                                         (unsigned int)pci_location[0],
2201                                         (unsigned int)pci_location[1],
2202                                         (unsigned int)pci_location[2]);
2203
2204                 /* if device has a kernel timeout, assume it is used for display */
2205                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
2206                         info.description += " (Display)";
2207                         info.display_device = true;
2208                         display_devices.push_back(info);
2209                 }
2210                 else
2211                         devices.push_back(info);
2212         }
2213
2214         if(!display_devices.empty())
2215                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2216 }
2217
2218 string device_cuda_capabilities(void)
2219 {
2220         CUresult result = cuInit(0);
2221         if(result != CUDA_SUCCESS) {
2222                 if(result != CUDA_ERROR_NO_DEVICE) {
2223                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2224                 }
2225                 return "No CUDA device found\n";
2226         }
2227
2228         int count;
2229         result = cuDeviceGetCount(&count);
2230         if(result != CUDA_SUCCESS) {
2231                 return string("Error getting devices: ") + cuewErrorString(result);
2232         }
2233
2234         string capabilities = "";
2235         for(int num = 0; num < count; num++) {
2236                 char name[256];
2237                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2238                         continue;
2239                 }
2240                 capabilities += string("\t") + name + "\n";
2241                 int value;
2242 #define GET_ATTR(attr) \
2243                 { \
2244                         if(cuDeviceGetAttribute(&value, \
2245                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2246                                                 num) == CUDA_SUCCESS) \
2247                         { \
2248                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2249                                                               value); \
2250                         } \
2251                 } (void)0
2252                 /* TODO(sergey): Strip all attributes which are not useful for us
2253                  * or does not depend on the driver.
2254                  */
2255                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2256                 GET_ATTR(MAX_BLOCK_DIM_X);
2257                 GET_ATTR(MAX_BLOCK_DIM_Y);
2258                 GET_ATTR(MAX_BLOCK_DIM_Z);
2259                 GET_ATTR(MAX_GRID_DIM_X);
2260                 GET_ATTR(MAX_GRID_DIM_Y);
2261                 GET_ATTR(MAX_GRID_DIM_Z);
2262                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2263                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2264                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2265                 GET_ATTR(WARP_SIZE);
2266                 GET_ATTR(MAX_PITCH);
2267                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2268                 GET_ATTR(REGISTERS_PER_BLOCK);
2269                 GET_ATTR(CLOCK_RATE);
2270                 GET_ATTR(TEXTURE_ALIGNMENT);
2271                 GET_ATTR(GPU_OVERLAP);
2272                 GET_ATTR(MULTIPROCESSOR_COUNT);
2273                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2274                 GET_ATTR(INTEGRATED);
2275                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2276                 GET_ATTR(COMPUTE_MODE);
2277                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2278                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2279                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2280                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2281                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2282                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2283                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2284                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2285                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2286                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2287                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2288                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2289                 GET_ATTR(SURFACE_ALIGNMENT);
2290                 GET_ATTR(CONCURRENT_KERNELS);
2291                 GET_ATTR(ECC_ENABLED);
2292                 GET_ATTR(TCC_DRIVER);
2293                 GET_ATTR(MEMORY_CLOCK_RATE);
2294                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2295                 GET_ATTR(L2_CACHE_SIZE);
2296                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2297                 GET_ATTR(ASYNC_ENGINE_COUNT);
2298                 GET_ATTR(UNIFIED_ADDRESSING);
2299                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2300                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2301                 GET_ATTR(CAN_TEX2D_GATHER);
2302                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2303                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2304                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2305                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2306                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2307                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2308                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2309                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2310                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2311                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2312                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2313                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2314                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2315                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2316                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2317                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2318                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2319                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2320                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2321                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2322                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2323                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2324                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2325                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2326                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2327                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2328                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2329                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2330                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2331                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2332                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2333                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2334                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2335                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2336                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2337                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2338                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2339                 GET_ATTR(MANAGED_MEMORY);
2340                 GET_ATTR(MULTI_GPU_BOARD);
2341                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2342 #undef GET_ATTR
2343                 capabilities += "\n";
2344         }
2345
2346         return capabilities;
2347 }
2348
2349 CCL_NAMESPACE_END