Code refactor: move more memory allocation logic into device API.
[blender-staging.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_logging.h"
41 #include "util/util_map.h"
42 #include "util/util_md5.h"
43 #include "util/util_opengl.h"
44 #include "util/util_path.h"
45 #include "util/util_string.h"
46 #include "util/util_system.h"
47 #include "util/util_types.h"
48 #include "util/util_time.h"
49
50 #include "kernel/split/kernel_split_data_types.h"
51
52 CCL_NAMESPACE_BEGIN
53
54 #ifndef WITH_CUDA_DYNLOAD
55
56 /* Transparently implement some functions, so majority of the file does not need
57  * to worry about difference between dynamically loaded and linked CUDA at all.
58  */
59
60 namespace {
61
62 const char *cuewErrorString(CUresult result)
63 {
64         /* We can only give error code here without major code duplication, that
65          * should be enough since dynamic loading is only being disabled by folks
66          * who knows what they're doing anyway.
67          *
68          * NOTE: Avoid call from several threads.
69          */
70         static string error;
71         error = string_printf("%d", result);
72         return error.c_str();
73 }
74
75 const char *cuewCompilerPath(void)
76 {
77         return CYCLES_CUDA_NVCC_EXECUTABLE;
78 }
79
80 int cuewCompilerVersion(void)
81 {
82         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
83 }
84
85 }  /* namespace */
86 #endif  /* WITH_CUDA_DYNLOAD */
87
88 class CUDADevice;
89
90 class CUDASplitKernel : public DeviceSplitKernel {
91         CUDADevice *device;
92 public:
93         explicit CUDASplitKernel(CUDADevice *device);
94
95         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
96
97         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
98                                                     RenderTile& rtile,
99                                                     int num_global_elements,
100                                                     device_memory& kernel_globals,
101                                                     device_memory& kernel_data_,
102                                                     device_memory& split_data,
103                                                     device_memory& ray_state,
104                                                     device_memory& queue_index,
105                                                     device_memory& use_queues_flag,
106                                                     device_memory& work_pool_wgs);
107
108         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
109                                                                const DeviceRequestedFeatures&);
110         virtual int2 split_kernel_local_size();
111         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
112 };
113
114 /* Utility to push/pop CUDA context. */
115 class CUDAContextScope {
116 public:
117         CUDAContextScope(CUDADevice *device);
118         ~CUDAContextScope();
119
120 private:
121         CUDADevice *device;
122 };
123
124 class CUDADevice : public Device
125 {
126 public:
127         DedicatedTaskPool task_pool;
128         CUdevice cuDevice;
129         CUcontext cuContext;
130         CUmodule cuModule, cuFilterModule;
131         map<device_ptr, bool> tex_interp_map;
132         map<device_ptr, CUtexObject> tex_bindless_map;
133         int cuDevId;
134         int cuDevArchitecture;
135         bool first_error;
136         CUDASplitKernel *split_kernel;
137
138         struct PixelMem {
139                 GLuint cuPBO;
140                 CUgraphicsResource cuPBOresource;
141                 GLuint cuTexId;
142                 int w, h;
143         };
144
145         map<device_ptr, PixelMem> pixel_mem_map;
146
147         /* Bindless Textures */
148         device_vector<TextureInfo> texture_info;
149         bool need_texture_info;
150
151         CUdeviceptr cuda_device_ptr(device_ptr mem)
152         {
153                 return (CUdeviceptr)mem;
154         }
155
156         static bool have_precompiled_kernels()
157         {
158                 string cubins_path = path_get("lib");
159                 return path_exists(cubins_path);
160         }
161
162         virtual bool show_samples() const
163         {
164                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
165                 return true;
166         }
167
168 /*#ifdef NDEBUG
169 #define cuda_abort()
170 #else
171 #define cuda_abort() abort()
172 #endif*/
173         void cuda_error_documentation()
174         {
175                 if(first_error) {
176                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
177                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
178                         first_error = false;
179                 }
180         }
181
182 #define cuda_assert(stmt) \
183         { \
184                 CUresult result = stmt; \
185                 \
186                 if(result != CUDA_SUCCESS) { \
187                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
188                         if(error_msg == "") \
189                                 error_msg = message; \
190                         fprintf(stderr, "%s\n", message.c_str()); \
191                         /*cuda_abort();*/ \
192                         cuda_error_documentation(); \
193                 } \
194         } (void)0
195
196         bool cuda_error_(CUresult result, const string& stmt)
197         {
198                 if(result == CUDA_SUCCESS)
199                         return false;
200
201                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
202                 if(error_msg == "")
203                         error_msg = message;
204                 fprintf(stderr, "%s\n", message.c_str());
205                 cuda_error_documentation();
206                 return true;
207         }
208
209 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
210
211         void cuda_error_message(const string& message)
212         {
213                 if(error_msg == "")
214                         error_msg = message;
215                 fprintf(stderr, "%s\n", message.c_str());
216                 cuda_error_documentation();
217         }
218
219         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
220         : Device(info, stats, background_),
221           texture_info(this, "__texture_info", MEM_TEXTURE)
222         {
223                 first_error = true;
224                 background = background_;
225
226                 cuDevId = info.num;
227                 cuDevice = 0;
228                 cuContext = 0;
229
230                 cuModule = 0;
231                 cuFilterModule = 0;
232
233                 split_kernel = NULL;
234
235                 need_texture_info = false;
236
237                 /* intialize */
238                 if(cuda_error(cuInit(0)))
239                         return;
240
241                 /* setup device and context */
242                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
243                         return;
244
245                 CUresult result;
246
247                 if(background) {
248                         result = cuCtxCreate(&cuContext, 0, cuDevice);
249                 }
250                 else {
251                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
252
253                         if(result != CUDA_SUCCESS) {
254                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
255                                 background = true;
256                         }
257                 }
258
259                 if(cuda_error_(result, "cuCtxCreate"))
260                         return;
261
262                 int major, minor;
263                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
264                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
265                 cuDevArchitecture = major*100 + minor*10;
266
267                 /* Pop context set by cuCtxCreate. */
268                 cuCtxPopCurrent(NULL);
269         }
270
271         ~CUDADevice()
272         {
273                 task_pool.stop();
274
275                 delete split_kernel;
276
277                 if(info.has_bindless_textures) {
278                         texture_info.free();
279                 }
280
281                 cuda_assert(cuCtxDestroy(cuContext));
282         }
283
284         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
285         {
286                 int major, minor;
287                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
288                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
289
290                 /* We only support sm_20 and above */
291                 if(major < 2) {
292                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
293                         return false;
294                 }
295
296                 return true;
297         }
298
299         bool use_adaptive_compilation()
300         {
301                 return DebugFlags().cuda.adaptive_compile;
302         }
303
304         bool use_split_kernel()
305         {
306                 return DebugFlags().cuda.split_kernel;
307         }
308
309         /* Common NVCC flags which stays the same regardless of shading model,
310          * kernel sources md5 and only depends on compiler or compilation settings.
311          */
312         string compile_kernel_get_common_cflags(
313                 const DeviceRequestedFeatures& requested_features,
314                 bool filter=false, bool split=false)
315         {
316                 const int cuda_version = cuewCompilerVersion();
317                 const int machine = system_cpu_bits();
318                 const string source_path = path_get("source");
319                 const string include_path = source_path;
320                 string cflags = string_printf("-m%d "
321                                               "--ptxas-options=\"-v\" "
322                                               "--use_fast_math "
323                                               "-DNVCC "
324                                               "-D__KERNEL_CUDA_VERSION__=%d "
325                                                "-I\"%s\"",
326                                               machine,
327                                               cuda_version,
328                                               include_path.c_str());
329                 if(!filter && use_adaptive_compilation()) {
330                         cflags += " " + requested_features.get_build_options();
331                 }
332                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
333                 if(extra_cflags) {
334                         cflags += string(" ") + string(extra_cflags);
335                 }
336 #ifdef WITH_CYCLES_DEBUG
337                 cflags += " -D__KERNEL_DEBUG__";
338 #endif
339
340                 if(split) {
341                         cflags += " -D__SPLIT__";
342                 }
343
344                 return cflags;
345         }
346
347         bool compile_check_compiler() {
348                 const char *nvcc = cuewCompilerPath();
349                 if(nvcc == NULL) {
350                         cuda_error_message("CUDA nvcc compiler not found. "
351                                            "Install CUDA toolkit in default location.");
352                         return false;
353                 }
354                 const int cuda_version = cuewCompilerVersion();
355                 VLOG(1) << "Found nvcc " << nvcc
356                         << ", CUDA version " << cuda_version
357                         << ".";
358                 const int major = cuda_version / 10, minor = cuda_version & 10;
359                 if(cuda_version == 0) {
360                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
361                         return false;
362                 }
363                 if(cuda_version < 80) {
364                         printf("Unsupported CUDA version %d.%d detected, "
365                                "you need CUDA 8.0 or newer.\n",
366                                major, minor);
367                         return false;
368                 }
369                 else if(cuda_version != 80) {
370                         printf("CUDA version %d.%d detected, build may succeed but only "
371                                "CUDA 8.0 is officially supported.\n",
372                                major, minor);
373                 }
374                 return true;
375         }
376
377         string compile_kernel(const DeviceRequestedFeatures& requested_features,
378                               bool filter=false, bool split=false)
379         {
380                 const char *name, *source;
381                 if(filter) {
382                         name = "filter";
383                         source = "filter.cu";
384                 }
385                 else if(split) {
386                         name = "kernel_split";
387                         source = "kernel_split.cu";
388                 }
389                 else {
390                         name = "kernel";
391                         source = "kernel.cu";
392                 }
393                 /* Compute cubin name. */
394                 int major, minor;
395                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
396                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
397
398                 /* Attempt to use kernel provided with Blender. */
399                 if(!use_adaptive_compilation()) {
400                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
401                                                                     name, major, minor));
402                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
403                         if(path_exists(cubin)) {
404                                 VLOG(1) << "Using precompiled kernel.";
405                                 return cubin;
406                         }
407                 }
408
409                 const string common_cflags =
410                         compile_kernel_get_common_cflags(requested_features, filter, split);
411
412                 /* Try to use locally compiled kernel. */
413                 const string source_path = path_get("source");
414                 const string kernel_md5 = path_files_md5_hash(source_path);
415
416                 /* We include cflags into md5 so changing cuda toolkit or changing other
417                  * compiler command line arguments makes sure cubin gets re-built.
418                  */
419                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
420
421                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
422                                                         name, major, minor,
423                                                         cubin_md5.c_str());
424                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
425                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
426                 if(path_exists(cubin)) {
427                         VLOG(1) << "Using locally compiled kernel.";
428                         return cubin;
429                 }
430
431 #ifdef _WIN32
432                 if(have_precompiled_kernels()) {
433                         if(major < 2) {
434                                 cuda_error_message(string_printf(
435                                         "CUDA device requires compute capability 2.0 or up, "
436                                         "found %d.%d. Your GPU is not supported.",
437                                         major, minor));
438                         }
439                         else {
440                                 cuda_error_message(string_printf(
441                                         "CUDA binary kernel for this graphics card compute "
442                                         "capability (%d.%d) not found.",
443                                         major, minor));
444                         }
445                         return "";
446                 }
447 #endif
448
449                 /* Compile. */
450                 if(!compile_check_compiler()) {
451                         return "";
452                 }
453                 const char *nvcc = cuewCompilerPath();
454                 const string kernel = path_join(
455                         path_join(source_path, "kernel"),
456                         path_join("kernels",
457                                   path_join("cuda", source)));
458                 double starttime = time_dt();
459                 printf("Compiling CUDA kernel ...\n");
460
461                 path_create_directories(cubin);
462
463                 string command = string_printf("\"%s\" "
464                                                "-arch=sm_%d%d "
465                                                "--cubin \"%s\" "
466                                                "-o \"%s\" "
467                                                "%s ",
468                                                nvcc,
469                                                major, minor,
470                                                kernel.c_str(),
471                                                cubin.c_str(),
472                                                common_cflags.c_str());
473
474                 printf("%s\n", command.c_str());
475
476                 if(system(command.c_str()) == -1) {
477                         cuda_error_message("Failed to execute compilation command, "
478                                            "see console for details.");
479                         return "";
480                 }
481
482                 /* Verify if compilation succeeded */
483                 if(!path_exists(cubin)) {
484                         cuda_error_message("CUDA kernel compilation failed, "
485                                            "see console for details.");
486                         return "";
487                 }
488
489                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
490
491                 return cubin;
492         }
493
494         bool load_kernels(const DeviceRequestedFeatures& requested_features)
495         {
496                 /* TODO(sergey): Support kernels re-load for CUDA devices.
497                  *
498                  * Currently re-loading kernel will invalidate memory pointers,
499                  * causing problems in cuCtxSynchronize.
500                  */
501                 if(cuFilterModule && cuModule) {
502                         VLOG(1) << "Skipping kernel reload, not currently supported.";
503                         return true;
504                 }
505
506                 /* check if cuda init succeeded */
507                 if(cuContext == 0)
508                         return false;
509
510                 /* check if GPU is supported */
511                 if(!support_device(requested_features))
512                         return false;
513
514                 /* get kernel */
515                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
516                 if(cubin == "")
517                         return false;
518
519                 string filter_cubin = compile_kernel(requested_features, true, false);
520                 if(filter_cubin == "")
521                         return false;
522
523                 /* open module */
524                 CUDAContextScope scope(this);
525
526                 string cubin_data;
527                 CUresult result;
528
529                 if(path_read_text(cubin, cubin_data))
530                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
531                 else
532                         result = CUDA_ERROR_FILE_NOT_FOUND;
533
534                 if(cuda_error_(result, "cuModuleLoad"))
535                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
536
537                 if(path_read_text(filter_cubin, cubin_data))
538                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
539                 else
540                         result = CUDA_ERROR_FILE_NOT_FOUND;
541
542                 if(cuda_error_(result, "cuModuleLoad"))
543                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
544
545                 return (result == CUDA_SUCCESS);
546         }
547
548         void load_texture_info()
549         {
550                 if(info.has_bindless_textures && need_texture_info) {
551                         texture_info.copy_to_device();
552                         need_texture_info = false;
553                 }
554         }
555
556         void generic_alloc(device_memory& mem)
557         {
558                 CUDAContextScope scope(this);
559
560                 if(mem.name) {
561                         VLOG(1) << "Buffer allocate: " << mem.name << ", "
562                                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
563                                         << string_human_readable_size(mem.memory_size()) << ")";
564                 }
565
566                 CUdeviceptr device_pointer;
567                 size_t size = mem.memory_size();
568                 cuda_assert(cuMemAlloc(&device_pointer, size));
569                 mem.device_pointer = (device_ptr)device_pointer;
570                 mem.device_size = size;
571                 stats.mem_alloc(size);
572         }
573
574         void generic_copy_to(device_memory& mem)
575         {
576                 if(mem.device_pointer) {
577                         CUDAContextScope scope(this);
578                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
579                 }
580         }
581
582         void generic_free(device_memory& mem)
583         {
584                 if(mem.device_pointer) {
585                         CUDAContextScope scope(this);
586
587                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
588
589                         mem.device_pointer = 0;
590
591                         stats.mem_free(mem.device_size);
592                         mem.device_size = 0;
593                 }
594         }
595
596         void mem_alloc(device_memory& mem)
597         {
598                 if(mem.type == MEM_PIXELS && !background) {
599                         pixels_alloc(mem);
600                 }
601                 else if(mem.type == MEM_TEXTURE) {
602                         assert(!"mem_alloc not supported for textures.");
603                 }
604                 else {
605                         generic_alloc(mem);
606                 }
607         }
608
609         void mem_copy_to(device_memory& mem)
610         {
611                 if(mem.type == MEM_PIXELS) {
612                         assert(!"mem_copy_to not supported for pixels.");
613                 }
614                 else if(mem.type == MEM_TEXTURE) {
615                         tex_free(mem);
616                         tex_alloc(mem);
617                 }
618                 else {
619                         if(!mem.device_pointer) {
620                                 generic_alloc(mem);
621                         }
622
623                         generic_copy_to(mem);
624                 }
625         }
626
627         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
628         {
629                 if(mem.type == MEM_PIXELS && !background) {
630                         pixels_copy_from(mem, y, w, h);
631                 }
632                 else if(mem.type == MEM_TEXTURE) {
633                         assert(!"mem_copy_from not supported for textures.");
634                 }
635                 else {
636                         CUDAContextScope scope(this);
637                         size_t offset = elem*y*w;
638                         size_t size = elem*w*h;
639
640                         if(mem.device_pointer) {
641                                 cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
642                                                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
643                         }
644                         else {
645                                 memset((char*)mem.data_pointer + offset, 0, size);
646                         }
647                 }
648         }
649
650         void mem_zero(device_memory& mem)
651         {
652                 if(!mem.device_pointer) {
653                         mem_alloc(mem);
654                 }
655
656                 if(mem.data_pointer) {
657                         memset((void*)mem.data_pointer, 0, mem.memory_size());
658                 }
659
660                 if(mem.device_pointer) {
661                         CUDAContextScope scope(this);
662                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
663                 }
664         }
665
666         void mem_free(device_memory& mem)
667         {
668                 if(mem.type == MEM_PIXELS && !background) {
669                         pixels_free(mem);
670                 }
671                 else if(mem.type == MEM_TEXTURE) {
672                         tex_free(mem);
673                 }
674                 else {
675                         generic_free(mem);
676                 }
677         }
678
679         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
680         {
681                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
682         }
683
684         void const_copy_to(const char *name, void *host, size_t size)
685         {
686                 CUDAContextScope scope(this);
687                 CUdeviceptr mem;
688                 size_t bytes;
689
690                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
691                 //assert(bytes == size);
692                 cuda_assert(cuMemcpyHtoD(mem, host, size));
693         }
694
695         void tex_alloc(device_memory& mem)
696         {
697                 CUDAContextScope scope(this);
698
699                 VLOG(1) << "Texture allocate: " << mem.name << ", "
700                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
701                         << string_human_readable_size(mem.memory_size()) << ")";
702
703                 /* Check if we are on sm_30 or above, for bindless textures. */
704                 bool has_bindless_textures = info.has_bindless_textures;
705
706                 /* General variables for both architectures */
707                 string bind_name = mem.name;
708                 size_t dsize = datatype_size(mem.data_type);
709                 size_t size = mem.memory_size();
710
711                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
712                 switch(mem.extension) {
713                         case EXTENSION_REPEAT:
714                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
715                                 break;
716                         case EXTENSION_EXTEND:
717                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
718                                 break;
719                         case EXTENSION_CLIP:
720                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
721                                 break;
722                         default:
723                                 assert(0);
724                                 break;
725                 }
726
727                 CUfilter_mode filter_mode;
728                 if(mem.interpolation == INTERPOLATION_CLOSEST) {
729                         filter_mode = CU_TR_FILTER_MODE_POINT;
730                 }
731                 else {
732                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
733                 }
734
735                 /* General variables for Fermi */
736                 CUtexref texref = NULL;
737
738                 if(!has_bindless_textures && mem.interpolation != INTERPOLATION_NONE) {
739                         if(mem.data_depth > 1) {
740                                 /* Kernel uses different bind names for 2d and 3d float textures,
741                                  * so we have to adjust couple of things here.
742                                  */
743                                 vector<string> tokens;
744                                 string_split(tokens, mem.name, "_");
745                                 bind_name = string_printf("__tex_image_%s_3d_%s",
746                                                           tokens[2].c_str(),
747                                                           tokens[3].c_str());
748                         }
749
750                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
751
752                         if(!texref) {
753                                 return;
754                         }
755                 }
756
757                 if(mem.interpolation == INTERPOLATION_NONE) {
758                         /* Data Storage */
759                         generic_alloc(mem);
760                         generic_copy_to(mem);
761
762                         CUdeviceptr cumem;
763                         size_t cubytes;
764
765                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
766
767                         if(cubytes == 8) {
768                                 /* 64 bit device pointer */
769                                 uint64_t ptr = mem.device_pointer;
770                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
771                         }
772                         else {
773                                 /* 32 bit device pointer */
774                                 uint32_t ptr = (uint32_t)mem.device_pointer;
775                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
776                         }
777                 }
778                 else {
779                         /* Texture Storage */
780                         CUarray handle = NULL;
781
782                         CUarray_format_enum format;
783                         switch(mem.data_type) {
784                                 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
785                                 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
786                                 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
787                                 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
788                                 case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
789                                 default: assert(0); return;
790                         }
791
792                         if(mem.data_depth > 1) {
793                                 CUDA_ARRAY3D_DESCRIPTOR desc;
794
795                                 desc.Width = mem.data_width;
796                                 desc.Height = mem.data_height;
797                                 desc.Depth = mem.data_depth;
798                                 desc.Format = format;
799                                 desc.NumChannels = mem.data_elements;
800                                 desc.Flags = 0;
801
802                                 cuda_assert(cuArray3DCreate(&handle, &desc));
803                         }
804                         else {
805                                 CUDA_ARRAY_DESCRIPTOR desc;
806
807                                 desc.Width = mem.data_width;
808                                 desc.Height = mem.data_height;
809                                 desc.Format = format;
810                                 desc.NumChannels = mem.data_elements;
811
812                                 cuda_assert(cuArrayCreate(&handle, &desc));
813                         }
814
815                         if(!handle) {
816                                 return;
817                         }
818
819                         /* Allocate 3D, 2D or 1D memory */
820                         if(mem.data_depth > 1) {
821                                 CUDA_MEMCPY3D param;
822                                 memset(&param, 0, sizeof(param));
823                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
824                                 param.dstArray = handle;
825                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
826                                 param.srcHost = (void*)mem.data_pointer;
827                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
828                                 param.WidthInBytes = param.srcPitch;
829                                 param.Height = mem.data_height;
830                                 param.Depth = mem.data_depth;
831
832                                 cuda_assert(cuMemcpy3D(&param));
833                         }
834                         else if(mem.data_height > 1) {
835                                 CUDA_MEMCPY2D param;
836                                 memset(&param, 0, sizeof(param));
837                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
838                                 param.dstArray = handle;
839                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
840                                 param.srcHost = (void*)mem.data_pointer;
841                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
842                                 param.WidthInBytes = param.srcPitch;
843                                 param.Height = mem.data_height;
844
845                                 cuda_assert(cuMemcpy2D(&param));
846                         }
847                         else
848                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
849
850                         /* Fermi and Kepler */
851                         mem.device_pointer = (device_ptr)handle;
852                         mem.device_size = size;
853
854                         stats.mem_alloc(size);
855
856                         if(has_bindless_textures) {
857                                 /* Bindless Textures - Kepler */
858                                 int flat_slot = 0;
859                                 if(string_startswith(mem.name, "__tex_image")) {
860                                         int pos =  string(mem.name).rfind("_");
861                                         flat_slot = atoi(mem.name + pos + 1);
862                                 }
863                                 else {
864                                         assert(0);
865                                 }
866
867                                 CUDA_RESOURCE_DESC resDesc;
868                                 memset(&resDesc, 0, sizeof(resDesc));
869                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
870                                 resDesc.res.array.hArray = handle;
871                                 resDesc.flags = 0;
872
873                                 CUDA_TEXTURE_DESC texDesc;
874                                 memset(&texDesc, 0, sizeof(texDesc));
875                                 texDesc.addressMode[0] = address_mode;
876                                 texDesc.addressMode[1] = address_mode;
877                                 texDesc.addressMode[2] = address_mode;
878                                 texDesc.filterMode = filter_mode;
879                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
880
881                                 CUtexObject tex = 0;
882                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
883
884                                 /* Safety check */
885                                 if((uint)tex > UINT_MAX) {
886                                         assert(0);
887                                 }
888
889                                 /* Resize once */
890                                 if(flat_slot >= texture_info.size()) {
891                                         /* Allocate some slots in advance, to reduce amount
892                                          * of re-allocations. */
893                                         texture_info.resize(flat_slot + 128);
894                                 }
895
896                                 /* Set Mapping and tag that we need to (re-)upload to device */
897                                 TextureInfo& info = texture_info[flat_slot];
898                                 info.data = (uint64_t)tex;
899                                 info.cl_buffer = 0;
900                                 info.interpolation = mem.interpolation;
901                                 info.extension = mem.extension;
902                                 info.width = mem.data_width;
903                                 info.height = mem.data_height;
904                                 info.depth = mem.data_depth;
905
906                                 tex_bindless_map[mem.device_pointer] = tex;
907                                 need_texture_info = true;
908                         }
909                         else {
910                                 /* Regular Textures - Fermi */
911                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
912                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
913                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
914
915                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
916                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
917                                 if(mem.data_depth > 1) {
918                                         cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
919                                 }
920
921                                 cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
922                         }
923                 }
924
925                 /* Fermi and Kepler */
926                 tex_interp_map[mem.device_pointer] = (mem.interpolation != INTERPOLATION_NONE);
927         }
928
929         void tex_free(device_memory& mem)
930         {
931                 if(mem.device_pointer) {
932                         if(tex_interp_map[mem.device_pointer]) {
933                                 CUDAContextScope scope(this);
934                                 cuArrayDestroy((CUarray)mem.device_pointer);
935
936                                 /* Free CUtexObject (Bindless Textures) */
937                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
938                                         CUtexObject tex = tex_bindless_map[mem.device_pointer];
939                                         cuTexObjectDestroy(tex);
940                                 }
941
942                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
943                                 mem.device_pointer = 0;
944
945                                 stats.mem_free(mem.device_size);
946                                 mem.device_size = 0;
947                         }
948                         else {
949                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
950                                 generic_free(mem);
951                         }
952                 }
953         }
954
955         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
956         {
957                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.data_pointer;
958                 for(int i = 0; i < 9; i++) {
959                         tiles->buffers[i] = buffers[i];
960                 }
961
962                 task->tiles_mem.copy_to_device();
963
964                 return !have_error();
965         }
966
967 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
968                         int threads_per_block;                                                                              \
969                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
970                         int threads = (int)sqrt((float)threads_per_block);                                                  \
971                         int xblocks = ((w) + threads - 1)/threads;                                                          \
972                         int yblocks = ((h) + threads - 1)/threads;
973
974 #define CUDA_LAUNCH_KERNEL(func, args)                      \
975                         cuda_assert(cuLaunchKernel(func,                \
976                                                    xblocks, yblocks, 1, \
977                                                    threads, threads, 1, \
978                                                    0, 0, args, 0));
979
980         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
981                                        DenoisingTask *task)
982         {
983                 if(have_error())
984                         return false;
985
986                 CUDAContextScope scope(this);
987
988                 int4 rect = task->rect;
989                 int w = align_up(rect.z-rect.x, 4);
990                 int h = rect.w-rect.y;
991                 int r = task->nlm_state.r;
992                 int f = task->nlm_state.f;
993                 float a = task->nlm_state.a;
994                 float k_2 = task->nlm_state.k_2;
995
996                 CUdeviceptr difference     = task->nlm_state.temporary_1_ptr;
997                 CUdeviceptr blurDifference = task->nlm_state.temporary_2_ptr;
998                 CUdeviceptr weightAccum    = task->nlm_state.temporary_3_ptr;
999
1000                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*w*h));
1001                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*w*h));
1002
1003                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput, cuNLMNormalize;
1004                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1005                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1006                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1007                 cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
1008                 cuda_assert(cuModuleGetFunction(&cuNLMNormalize,      cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
1009
1010                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
1011                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
1012                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
1013                 cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
1014                 cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize,      CU_FUNC_CACHE_PREFER_L1));
1015
1016                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference, rect.z-rect.x, rect.w-rect.y);
1017
1018                 int dx, dy;
1019                 int4 local_rect;
1020                 int channel_offset = 0;
1021                 void *calc_difference_args[] = {&dx, &dy, &guide_ptr, &variance_ptr, &difference, &local_rect, &w, &channel_offset, &a, &k_2};
1022                 void *blur_args[]            = {&difference, &blurDifference, &local_rect, &w, &f};
1023                 void *calc_weight_args[]     = {&blurDifference, &difference, &local_rect, &w, &f};
1024                 void *update_output_args[]   = {&dx, &dy, &blurDifference, &image_ptr, &out_ptr, &weightAccum, &local_rect, &w, &f};
1025
1026                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1027                         dy = i / (2*r+1) - r;
1028                         dx = i % (2*r+1) - r;
1029                         local_rect = make_int4(max(0, -dx), max(0, -dy), rect.z-rect.x - max(0, dx), rect.w-rect.y - max(0, dy));
1030
1031                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1032                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1033                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1034                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1035                         CUDA_LAUNCH_KERNEL(cuNLMUpdateOutput, update_output_args);
1036                 }
1037
1038                 local_rect = make_int4(0, 0, rect.z-rect.x, rect.w-rect.y);
1039                 void *normalize_args[] = {&out_ptr, &weightAccum, &local_rect, &w};
1040                 CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1041                 cuda_assert(cuCtxSynchronize());
1042
1043                 return !have_error();
1044         }
1045
1046         bool denoising_construct_transform(DenoisingTask *task)
1047         {
1048                 if(have_error())
1049                         return false;
1050
1051                 CUDAContextScope scope(this);
1052
1053                 CUfunction cuFilterConstructTransform;
1054                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1055                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1056                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1057                                    task->storage.w,
1058                                    task->storage.h);
1059
1060                 void *args[] = {&task->buffer.mem.device_pointer,
1061                                 &task->storage.transform.device_pointer,
1062                                 &task->storage.rank.device_pointer,
1063                                 &task->filter_area,
1064                                 &task->rect,
1065                                 &task->radius,
1066                                 &task->pca_threshold,
1067                                 &task->buffer.pass_stride};
1068                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1069                 cuda_assert(cuCtxSynchronize());
1070
1071                 return !have_error();
1072         }
1073
1074         bool denoising_reconstruct(device_ptr color_ptr,
1075                                    device_ptr color_variance_ptr,
1076                                    device_ptr output_ptr,
1077                                    DenoisingTask *task)
1078         {
1079                 if(have_error())
1080                         return false;
1081
1082                 CUDAContextScope scope(this);
1083
1084                 mem_zero(task->storage.XtWX);
1085                 mem_zero(task->storage.XtWY);
1086
1087                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian, cuFinalize;
1088                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1089                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1090                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1091                 cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1092                 cuda_assert(cuModuleGetFunction(&cuFinalize,            cuFilterModule, "kernel_cuda_filter_finalize"));
1093
1094                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1095                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1096                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1097                 cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1098                 cuda_assert(cuFuncSetCacheConfig(cuFinalize,            CU_FUNC_CACHE_PREFER_L1));
1099
1100                 CUDA_GET_BLOCKSIZE(cuNLMCalcDifference,
1101                                    task->reconstruction_state.source_w,
1102                                    task->reconstruction_state.source_h);
1103
1104                 CUdeviceptr difference     = task->reconstruction_state.temporary_1_ptr;
1105                 CUdeviceptr blurDifference = task->reconstruction_state.temporary_2_ptr;
1106
1107                 int r = task->radius;
1108                 int f = 4;
1109                 float a = 1.0f;
1110                 for(int i = 0; i < (2*r+1)*(2*r+1); i++) {
1111                         int dy = i / (2*r+1) - r;
1112                         int dx = i % (2*r+1) - r;
1113
1114                         int local_rect[4] = {max(0, -dx), max(0, -dy),
1115                                              task->reconstruction_state.source_w - max(0, dx),
1116                                              task->reconstruction_state.source_h - max(0, dy)};
1117
1118                         void *calc_difference_args[] = {&dx, &dy,
1119                                                         &color_ptr,
1120                                                         &color_variance_ptr,
1121                                                         &difference,
1122                                                         &local_rect,
1123                                                         &task->buffer.w,
1124                                                         &task->buffer.pass_stride,
1125                                                         &a,
1126                                                         &task->nlm_k_2};
1127                         CUDA_LAUNCH_KERNEL(cuNLMCalcDifference, calc_difference_args);
1128
1129                         void *blur_args[] = {&difference,
1130                                              &blurDifference,
1131                                              &local_rect,
1132                                              &task->buffer.w,
1133                                              &f};
1134                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1135
1136                         void *calc_weight_args[] = {&blurDifference,
1137                                                     &difference,
1138                                                     &local_rect,
1139                                                     &task->buffer.w,
1140                                                     &f};
1141                         CUDA_LAUNCH_KERNEL(cuNLMCalcWeight, calc_weight_args);
1142
1143                         /* Reuse previous arguments. */
1144                         CUDA_LAUNCH_KERNEL(cuNLMBlur, blur_args);
1145
1146                         void *construct_gramian_args[] = {&dx, &dy,
1147                                                           &blurDifference,
1148                                                           &task->buffer.mem.device_pointer,
1149                                                           &task->storage.transform.device_pointer,
1150                                                           &task->storage.rank.device_pointer,
1151                                                           &task->storage.XtWX.device_pointer,
1152                                                           &task->storage.XtWY.device_pointer,
1153                                                           &local_rect,
1154                                                           &task->reconstruction_state.filter_rect,
1155                                                           &task->buffer.w,
1156                                                           &task->buffer.h,
1157                                                           &f,
1158                                                       &task->buffer.pass_stride};
1159                         CUDA_LAUNCH_KERNEL(cuNLMConstructGramian, construct_gramian_args);
1160                 }
1161
1162                 void *finalize_args[] = {&task->buffer.w,
1163                                          &task->buffer.h,
1164                                          &output_ptr,
1165                                                  &task->storage.rank.device_pointer,
1166                                                  &task->storage.XtWX.device_pointer,
1167                                                  &task->storage.XtWY.device_pointer,
1168                                                  &task->filter_area,
1169                                                  &task->reconstruction_state.buffer_params.x,
1170                                                  &task->render_buffer.samples};
1171                 CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1172                 cuda_assert(cuCtxSynchronize());
1173
1174                 return !have_error();
1175         }
1176
1177         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1178                                       device_ptr mean_ptr, device_ptr variance_ptr,
1179                                       int r, int4 rect, DenoisingTask *task)
1180         {
1181                 if(have_error())
1182                         return false;
1183
1184                 CUDAContextScope scope(this);
1185
1186                 CUfunction cuFilterCombineHalves;
1187                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1188                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1189                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1190                                    task->rect.z-task->rect.x,
1191                                    task->rect.w-task->rect.y);
1192
1193                 void *args[] = {&mean_ptr,
1194                                 &variance_ptr,
1195                                 &a_ptr,
1196                                 &b_ptr,
1197                                 &rect,
1198                                 &r};
1199                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1200                 cuda_assert(cuCtxSynchronize());
1201
1202                 return !have_error();
1203         }
1204
1205         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1206                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1207                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1208         {
1209                 if(have_error())
1210                         return false;
1211
1212                 CUDAContextScope scope(this);
1213
1214                 CUfunction cuFilterDivideShadow;
1215                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1216                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1217                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1218                                    task->rect.z-task->rect.x,
1219                                    task->rect.w-task->rect.y);
1220
1221                 void *args[] = {&task->render_buffer.samples,
1222                                 &task->tiles_mem.device_pointer,
1223                                 &a_ptr,
1224                                 &b_ptr,
1225                                 &sample_variance_ptr,
1226                                 &sv_variance_ptr,
1227                                 &buffer_variance_ptr,
1228                                 &task->rect,
1229                                 &task->render_buffer.pass_stride,
1230                                 &task->render_buffer.denoising_data_offset};
1231                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1232                 cuda_assert(cuCtxSynchronize());
1233
1234                 return !have_error();
1235         }
1236
1237         bool denoising_get_feature(int mean_offset,
1238                                    int variance_offset,
1239                                    device_ptr mean_ptr,
1240                                    device_ptr variance_ptr,
1241                                    DenoisingTask *task)
1242         {
1243                 if(have_error())
1244                         return false;
1245
1246                 CUDAContextScope scope(this);
1247
1248                 CUfunction cuFilterGetFeature;
1249                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1250                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1251                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1252                                    task->rect.z-task->rect.x,
1253                                    task->rect.w-task->rect.y);
1254
1255                 void *args[] = {&task->render_buffer.samples,
1256                                 &task->tiles_mem.device_pointer,
1257                                         &mean_offset,
1258                                         &variance_offset,
1259                                 &mean_ptr,
1260                                 &variance_ptr,
1261                                 &task->rect,
1262                                 &task->render_buffer.pass_stride,
1263                                 &task->render_buffer.denoising_data_offset};
1264                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1265                 cuda_assert(cuCtxSynchronize());
1266
1267                 return !have_error();
1268         }
1269
1270         bool denoising_detect_outliers(device_ptr image_ptr,
1271                                        device_ptr variance_ptr,
1272                                        device_ptr depth_ptr,
1273                                        device_ptr output_ptr,
1274                                        DenoisingTask *task)
1275         {
1276                 if(have_error())
1277                         return false;
1278
1279                 CUDAContextScope scope(this);
1280
1281                 CUfunction cuFilterDetectOutliers;
1282                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1283                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1284                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1285                                    task->rect.z-task->rect.x,
1286                                    task->rect.w-task->rect.y);
1287
1288                 void *args[] = {&image_ptr,
1289                                 &variance_ptr,
1290                                 &depth_ptr,
1291                                 &output_ptr,
1292                                 &task->rect,
1293                                 &task->buffer.pass_stride};
1294
1295                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1296                 cuda_assert(cuCtxSynchronize());
1297
1298                 return !have_error();
1299         }
1300
1301         void denoise(RenderTile &rtile, const DeviceTask &task)
1302         {
1303                 DenoisingTask denoising(this);
1304
1305                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1306                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1307                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1308                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1309                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1310                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1311                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1312                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1313
1314                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1315                 denoising.render_buffer.samples = rtile.sample;
1316
1317                 RenderTile rtiles[9];
1318                 rtiles[4] = rtile;
1319                 task.map_neighbor_tiles(rtiles, this);
1320                 denoising.tiles_from_rendertiles(rtiles);
1321
1322                 denoising.init_from_devicetask(task);
1323
1324                 denoising.run_denoising();
1325
1326                 task.unmap_neighbor_tiles(rtiles, this);
1327         }
1328
1329         void path_trace(DeviceTask& task, RenderTile& rtile, device_vector<WorkTile>& work_tiles)
1330         {
1331                 if(have_error())
1332                         return;
1333
1334                 CUDAContextScope scope(this);
1335                 CUfunction cuPathTrace;
1336
1337                 /* Get kernel function. */
1338                 if(task.integrator_branched) {
1339                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1340                 }
1341                 else {
1342                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1343                 }
1344
1345                 if(have_error()) {
1346                         return;
1347                 }
1348
1349                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1350
1351                 /* Allocate work tile. */
1352                 work_tiles.alloc(1);
1353
1354                 WorkTile *wtile = work_tiles.get_data();
1355                 wtile->x = rtile.x;
1356                 wtile->y = rtile.y;
1357                 wtile->w = rtile.w;
1358                 wtile->h = rtile.h;
1359                 wtile->offset = rtile.offset;
1360                 wtile->stride = rtile.stride;
1361                 wtile->buffer = (float*)cuda_device_ptr(rtile.buffer);
1362
1363                 /* Prepare work size. More step samples render faster, but for now we
1364                  * remain conservative for GPUs connected to a display to avoid driver
1365                  * timeouts and display freezing. */
1366                 int min_blocks, num_threads_per_block;
1367                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
1368                 if(!info.display_device) {
1369                         min_blocks *= 8;
1370                 }
1371
1372                 uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);;
1373
1374                 /* Render all samples. */
1375                 int start_sample = rtile.start_sample;
1376                 int end_sample = rtile.start_sample + rtile.num_samples;
1377
1378                 for(int sample = start_sample; sample < end_sample; sample += step_samples) {
1379                         /* Setup and copy work tile to device. */
1380                         wtile->start_sample = sample;
1381                         wtile->num_samples = min(step_samples, end_sample - sample);;
1382                         work_tiles.copy_to_device();
1383
1384                         CUdeviceptr d_work_tiles = cuda_device_ptr(work_tiles.device_pointer);
1385                         uint total_work_size = wtile->w * wtile->h * wtile->num_samples;
1386                         uint num_blocks = divide_up(total_work_size, num_threads_per_block);
1387
1388                         /* Launch kernel. */
1389                         void *args[] = {&d_work_tiles,
1390                                         &total_work_size};
1391
1392                         cuda_assert(cuLaunchKernel(cuPathTrace,
1393                                                    num_blocks, 1, 1,
1394                                                    num_threads_per_block, 1, 1,
1395                                                    0, 0, args, 0));
1396
1397                         cuda_assert(cuCtxSynchronize());
1398
1399                         /* Update progress. */
1400                         rtile.sample = sample + wtile->num_samples;
1401                         task.update_progress(&rtile, rtile.w*rtile.h*wtile->num_samples);
1402
1403                         if(task.get_cancel()) {
1404                                 if(task.need_finish_queue == false)
1405                                         break;
1406                         }
1407                 }
1408         }
1409
1410         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1411         {
1412                 if(have_error())
1413                         return;
1414
1415                 CUDAContextScope scope(this);
1416
1417                 CUfunction cuFilmConvert;
1418                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1419                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1420
1421                 /* get kernel function */
1422                 if(rgba_half) {
1423                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1424                 }
1425                 else {
1426                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1427                 }
1428
1429
1430                 float sample_scale = 1.0f/(task.sample + 1);
1431
1432                 /* pass in parameters */
1433                 void *args[] = {&d_rgba,
1434                                 &d_buffer,
1435                                 &sample_scale,
1436                                 &task.x,
1437                                 &task.y,
1438                                 &task.w,
1439                                 &task.h,
1440                                 &task.offset,
1441                                 &task.stride};
1442
1443                 /* launch kernel */
1444                 int threads_per_block;
1445                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1446
1447                 int xthreads = (int)sqrt(threads_per_block);
1448                 int ythreads = (int)sqrt(threads_per_block);
1449                 int xblocks = (task.w + xthreads - 1)/xthreads;
1450                 int yblocks = (task.h + ythreads - 1)/ythreads;
1451
1452                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1453
1454                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1455                                            xblocks , yblocks, 1, /* blocks */
1456                                            xthreads, ythreads, 1, /* threads */
1457                                            0, 0, args, 0));
1458
1459                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1460         }
1461
1462         void shader(DeviceTask& task)
1463         {
1464                 if(have_error())
1465                         return;
1466
1467                 CUDAContextScope scope(this);
1468
1469                 CUfunction cuShader;
1470                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1471                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1472
1473                 /* get kernel function */
1474                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1475                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1476                 }
1477                 else if(task.shader_eval_type == SHADER_EVAL_DISPLACE) {
1478                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_displace"));
1479                 }
1480                 else {
1481                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_background"));
1482                 }
1483
1484                 /* do tasks in smaller chunks, so we can cancel it */
1485                 const int shader_chunk_size = 65536;
1486                 const int start = task.shader_x;
1487                 const int end = task.shader_x + task.shader_w;
1488                 int offset = task.offset;
1489
1490                 bool canceled = false;
1491                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1492                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1493                                 int shader_w = min(shader_chunk_size, end - shader_x);
1494
1495                                 /* pass in parameters */
1496                                 void *args[8];
1497                                 int arg = 0;
1498                                 args[arg++] = &d_input;
1499                                 args[arg++] = &d_output;
1500                                 args[arg++] = &task.shader_eval_type;
1501                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1502                                         args[arg++] = &task.shader_filter;
1503                                 }
1504                                 args[arg++] = &shader_x;
1505                                 args[arg++] = &shader_w;
1506                                 args[arg++] = &offset;
1507                                 args[arg++] = &sample;
1508
1509                                 /* launch kernel */
1510                                 int threads_per_block;
1511                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1512
1513                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1514
1515                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1516                                 cuda_assert(cuLaunchKernel(cuShader,
1517                                                            xblocks , 1, 1, /* blocks */
1518                                                            threads_per_block, 1, 1, /* threads */
1519                                                            0, 0, args, 0));
1520
1521                                 cuda_assert(cuCtxSynchronize());
1522
1523                                 if(task.get_cancel()) {
1524                                         canceled = true;
1525                                         break;
1526                                 }
1527                         }
1528
1529                         task.update_progress(NULL);
1530                 }
1531         }
1532
1533         CUdeviceptr map_pixels(device_ptr mem)
1534         {
1535                 if(!background) {
1536                         PixelMem pmem = pixel_mem_map[mem];
1537                         CUdeviceptr buffer;
1538
1539                         size_t bytes;
1540                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1541                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1542
1543                         return buffer;
1544                 }
1545
1546                 return cuda_device_ptr(mem);
1547         }
1548
1549         void unmap_pixels(device_ptr mem)
1550         {
1551                 if(!background) {
1552                         PixelMem pmem = pixel_mem_map[mem];
1553
1554                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1555                 }
1556         }
1557
1558         void pixels_alloc(device_memory& mem)
1559         {
1560                 PixelMem pmem;
1561
1562                 pmem.w = mem.data_width;
1563                 pmem.h = mem.data_height;
1564
1565                 CUDAContextScope scope(this);
1566
1567                 glGenBuffers(1, &pmem.cuPBO);
1568                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1569                 if(mem.data_type == TYPE_HALF)
1570                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1571                 else
1572                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1573
1574                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1575
1576                 glGenTextures(1, &pmem.cuTexId);
1577                 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1578                 if(mem.data_type == TYPE_HALF)
1579                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1580                 else
1581                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1582                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1583                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1584                 glBindTexture(GL_TEXTURE_2D, 0);
1585
1586                 CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1587
1588                 if(result == CUDA_SUCCESS) {
1589                         mem.device_pointer = pmem.cuTexId;
1590                         pixel_mem_map[mem.device_pointer] = pmem;
1591
1592                         mem.device_size = mem.memory_size();
1593                         stats.mem_alloc(mem.device_size);
1594
1595                         return;
1596                 }
1597                 else {
1598                         /* failed to register buffer, fallback to no interop */
1599                         glDeleteBuffers(1, &pmem.cuPBO);
1600                         glDeleteTextures(1, &pmem.cuTexId);
1601
1602                         background = true;
1603                 }
1604         }
1605
1606         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1607         {
1608                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1609
1610                 CUDAContextScope scope(this);
1611
1612                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1613                 uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1614                 size_t offset = sizeof(uchar)*4*y*w;
1615                 memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1616                 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1617                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1618         }
1619
1620         void pixels_free(device_memory& mem)
1621         {
1622                 if(mem.device_pointer) {
1623                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1624
1625                         CUDAContextScope scope(this);
1626
1627                         cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1628                         glDeleteBuffers(1, &pmem.cuPBO);
1629                         glDeleteTextures(1, &pmem.cuTexId);
1630
1631                         pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1632                         mem.device_pointer = 0;
1633
1634                         stats.mem_free(mem.device_size);
1635                         mem.device_size = 0;
1636                 }
1637         }
1638
1639         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1640                 const DeviceDrawParams &draw_params)
1641         {
1642                 assert(mem.type == MEM_PIXELS);
1643
1644                 if(!background) {
1645                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1646                         float *vpointer;
1647
1648                         CUDAContextScope scope(this);
1649
1650                         /* for multi devices, this assumes the inefficient method that we allocate
1651                          * all pixels on the device even though we only render to a subset */
1652                         size_t offset = 4*y*w;
1653
1654                         if(mem.data_type == TYPE_HALF)
1655                                 offset *= sizeof(GLhalf);
1656                         else
1657                                 offset *= sizeof(uint8_t);
1658
1659                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1660                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1661                         if(mem.data_type == TYPE_HALF)
1662                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1663                         else
1664                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1665                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1666
1667                         glEnable(GL_TEXTURE_2D);
1668
1669                         if(transparent) {
1670                                 glEnable(GL_BLEND);
1671                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1672                         }
1673
1674                         glColor3f(1.0f, 1.0f, 1.0f);
1675
1676                         if(draw_params.bind_display_space_shader_cb) {
1677                                 draw_params.bind_display_space_shader_cb();
1678                         }
1679
1680                         if(!vertex_buffer)
1681                                 glGenBuffers(1, &vertex_buffer);
1682
1683                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1684                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1685                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1686
1687                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1688
1689                         if(vpointer) {
1690                                 /* texture coordinate - vertex pair */
1691                                 vpointer[0] = 0.0f;
1692                                 vpointer[1] = 0.0f;
1693                                 vpointer[2] = dx;
1694                                 vpointer[3] = dy;
1695
1696                                 vpointer[4] = (float)w/(float)pmem.w;
1697                                 vpointer[5] = 0.0f;
1698                                 vpointer[6] = (float)width + dx;
1699                                 vpointer[7] = dy;
1700
1701                                 vpointer[8] = (float)w/(float)pmem.w;
1702                                 vpointer[9] = (float)h/(float)pmem.h;
1703                                 vpointer[10] = (float)width + dx;
1704                                 vpointer[11] = (float)height + dy;
1705
1706                                 vpointer[12] = 0.0f;
1707                                 vpointer[13] = (float)h/(float)pmem.h;
1708                                 vpointer[14] = dx;
1709                                 vpointer[15] = (float)height + dy;
1710
1711                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1712                         }
1713
1714                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1715                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1716
1717                         glEnableClientState(GL_VERTEX_ARRAY);
1718                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1719
1720                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1721
1722                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1723                         glDisableClientState(GL_VERTEX_ARRAY);
1724
1725                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1726
1727                         if(draw_params.unbind_display_space_shader_cb) {
1728                                 draw_params.unbind_display_space_shader_cb();
1729                         }
1730
1731                         if(transparent)
1732                                 glDisable(GL_BLEND);
1733
1734                         glBindTexture(GL_TEXTURE_2D, 0);
1735                         glDisable(GL_TEXTURE_2D);
1736
1737                         return;
1738                 }
1739
1740                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1741         }
1742
1743         void thread_run(DeviceTask *task)
1744         {
1745                 CUDAContextScope scope(this);
1746
1747                 if(task->type == DeviceTask::RENDER) {
1748                         RenderTile tile;
1749
1750                         DeviceRequestedFeatures requested_features;
1751                         if(use_split_kernel()) {
1752                                 if(!use_adaptive_compilation()) {
1753                                         requested_features.max_closure = 64;
1754                                 }
1755
1756                                 if(split_kernel == NULL) {
1757                                         split_kernel = new CUDASplitKernel(this);
1758                                         split_kernel->load_kernels(requested_features);
1759                                 }
1760                         }
1761
1762                         device_vector<WorkTile> work_tiles(this, "work_tiles", MEM_READ_ONLY);
1763
1764                         /* keep rendering tiles until done */
1765                         while(task->acquire_tile(this, tile)) {
1766                                 if(tile.task == RenderTile::PATH_TRACE) {
1767                                         if(use_split_kernel()) {
1768                                                 device_only_memory<uchar> void_buffer(this, "void_buffer");
1769                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
1770                                         }
1771                                         else {
1772                                                 path_trace(*task, tile, work_tiles);
1773                                         }
1774                                 }
1775                                 else if(tile.task == RenderTile::DENOISE) {
1776                                         tile.sample = tile.start_sample + tile.num_samples;
1777
1778                                         denoise(tile, *task);
1779
1780                                         task->update_progress(&tile, tile.w*tile.h);
1781                                 }
1782
1783                                 task->release_tile(tile);
1784
1785                                 if(task->get_cancel()) {
1786                                         if(task->need_finish_queue == false)
1787                                                 break;
1788                                 }
1789                         }
1790
1791                         work_tiles.free();
1792                 }
1793                 else if(task->type == DeviceTask::SHADER) {
1794                         shader(*task);
1795
1796                         cuda_assert(cuCtxSynchronize());
1797                 }
1798         }
1799
1800         class CUDADeviceTask : public DeviceTask {
1801         public:
1802                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1803                 : DeviceTask(task)
1804                 {
1805                         run = function_bind(&CUDADevice::thread_run, device, this);
1806                 }
1807         };
1808
1809         int get_split_task_count(DeviceTask& /*task*/)
1810         {
1811                 return 1;
1812         }
1813
1814         void task_add(DeviceTask& task)
1815         {
1816                 CUDAContextScope scope(this);
1817
1818                 /* Load texture info. */
1819                 load_texture_info();
1820
1821                 if(task.type == DeviceTask::FILM_CONVERT) {
1822                         /* must be done in main thread due to opengl access */
1823                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1824                         cuda_assert(cuCtxSynchronize());
1825                 }
1826                 else {
1827                         task_pool.push(new CUDADeviceTask(this, task));
1828                 }
1829         }
1830
1831         void task_wait()
1832         {
1833                 task_pool.wait();
1834         }
1835
1836         void task_cancel()
1837         {
1838                 task_pool.cancel();
1839         }
1840
1841         friend class CUDASplitKernelFunction;
1842         friend class CUDASplitKernel;
1843         friend class CUDAContextScope;
1844 };
1845
1846 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1847  * now that the definition of that class is complete
1848  */
1849 #undef cuda_assert
1850 #define cuda_assert(stmt) \
1851         { \
1852                 CUresult result = stmt; \
1853                 \
1854                 if(result != CUDA_SUCCESS) { \
1855                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1856                         if(device->error_msg == "") \
1857                                 device->error_msg = message; \
1858                         fprintf(stderr, "%s\n", message.c_str()); \
1859                         /*cuda_abort();*/ \
1860                         device->cuda_error_documentation(); \
1861                 } \
1862         } (void)0
1863
1864
1865 /* CUDA context scope. */
1866
1867 CUDAContextScope::CUDAContextScope(CUDADevice *device)
1868 : device(device)
1869 {
1870         cuda_assert(cuCtxPushCurrent(device->cuContext));
1871 }
1872
1873 CUDAContextScope::~CUDAContextScope()
1874 {
1875         cuda_assert(cuCtxPopCurrent(NULL));
1876 }
1877
1878 /* split kernel */
1879
1880 class CUDASplitKernelFunction : public SplitKernelFunction{
1881         CUDADevice* device;
1882         CUfunction func;
1883 public:
1884         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
1885
1886         /* enqueue the kernel, returns false if there is an error */
1887         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
1888         {
1889                 return enqueue(dim, NULL);
1890         }
1891
1892         /* enqueue the kernel, returns false if there is an error */
1893         bool enqueue(const KernelDimensions &dim, void *args[])
1894         {
1895                 if(device->have_error())
1896                         return false;
1897
1898                 CUDAContextScope scope(device);
1899
1900                 /* we ignore dim.local_size for now, as this is faster */
1901                 int threads_per_block;
1902                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
1903
1904                 int xblocks = (dim.global_size[0]*dim.global_size[1] + threads_per_block - 1)/threads_per_block;
1905
1906                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
1907
1908                 cuda_assert(cuLaunchKernel(func,
1909                                            xblocks, 1, 1, /* blocks */
1910                                            threads_per_block, 1, 1, /* threads */
1911                                            0, 0, args, 0));
1912
1913                 return !device->have_error();
1914         }
1915 };
1916
1917 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
1918 {
1919 }
1920
1921 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
1922 {
1923         CUDAContextScope scope(device);
1924
1925         device_vector<uint64_t> size_buffer(device, "size_buffer", MEM_READ_WRITE);
1926         size_buffer.alloc(1);
1927         size_buffer.zero_to_device();
1928
1929         uint threads = num_threads;
1930         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
1931
1932         struct args_t {
1933                 uint* num_threads;
1934                 CUdeviceptr* size;
1935         };
1936
1937         args_t args = {
1938                 &threads,
1939                 &d_size
1940         };
1941
1942         CUfunction state_buffer_size;
1943         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
1944
1945         cuda_assert(cuLaunchKernel(state_buffer_size,
1946                                    1, 1, 1,
1947                                    1, 1, 1,
1948                                    0, 0, (void**)&args, 0));
1949
1950         size_buffer.copy_from_device(0, 1, 1);
1951         size_t size = size_buffer[0];
1952         size_buffer.free();
1953
1954         return size;
1955 }
1956
1957 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
1958                                     RenderTile& rtile,
1959                                     int num_global_elements,
1960                                     device_memory& /*kernel_globals*/,
1961                                     device_memory& /*kernel_data*/,
1962                                     device_memory& split_data,
1963                                     device_memory& ray_state,
1964                                     device_memory& queue_index,
1965                                     device_memory& use_queues_flag,
1966                                     device_memory& work_pool_wgs)
1967 {
1968         CUDAContextScope scope(device);
1969
1970         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
1971         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
1972         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
1973         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
1974         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
1975
1976         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
1977
1978         int end_sample = rtile.start_sample + rtile.num_samples;
1979         int queue_size = dim.global_size[0] * dim.global_size[1];
1980
1981         struct args_t {
1982                 CUdeviceptr* split_data_buffer;
1983                 int* num_elements;
1984                 CUdeviceptr* ray_state;
1985                 int* start_sample;
1986                 int* end_sample;
1987                 int* sx;
1988                 int* sy;
1989                 int* sw;
1990                 int* sh;
1991                 int* offset;
1992                 int* stride;
1993                 CUdeviceptr* queue_index;
1994                 int* queuesize;
1995                 CUdeviceptr* use_queues_flag;
1996                 CUdeviceptr* work_pool_wgs;
1997                 int* num_samples;
1998                 CUdeviceptr* buffer;
1999         };
2000
2001         args_t args = {
2002                 &d_split_data,
2003                 &num_global_elements,
2004                 &d_ray_state,
2005                 &rtile.start_sample,
2006                 &end_sample,
2007                 &rtile.x,
2008                 &rtile.y,
2009                 &rtile.w,
2010                 &rtile.h,
2011                 &rtile.offset,
2012                 &rtile.stride,
2013                 &d_queue_index,
2014                 &queue_size,
2015                 &d_use_queues_flag,
2016                 &d_work_pool_wgs,
2017                 &rtile.num_samples,
2018                 &d_buffer
2019         };
2020
2021         CUfunction data_init;
2022         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2023         if(device->have_error()) {
2024                 return false;
2025         }
2026
2027         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2028
2029         return !device->have_error();
2030 }
2031
2032 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2033                                                                 const DeviceRequestedFeatures&)
2034 {
2035         CUDAContextScope scope(device);
2036         CUfunction func;
2037
2038         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2039         if(device->have_error()) {
2040                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2041                 return NULL;
2042         }
2043
2044         return new CUDASplitKernelFunction(device, func);
2045 }
2046
2047 int2 CUDASplitKernel::split_kernel_local_size()
2048 {
2049         return make_int2(32, 1);
2050 }
2051
2052 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2053 {
2054         CUDAContextScope scope(device);
2055         size_t free;
2056         size_t total;
2057
2058         cuda_assert(cuMemGetInfo(&free, &total));
2059
2060         VLOG(1) << "Maximum device allocation size: "
2061                 << string_human_readable_number(free) << " bytes. ("
2062                 << string_human_readable_size(free) << ").";
2063
2064         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2065         size_t side = round_down((int)sqrt(num_elements), 32);
2066         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2067         VLOG(1) << "Global size: " << global_size << ".";
2068         return global_size;
2069 }
2070
2071 bool device_cuda_init(void)
2072 {
2073 #ifdef WITH_CUDA_DYNLOAD
2074         static bool initialized = false;
2075         static bool result = false;
2076
2077         if(initialized)
2078                 return result;
2079
2080         initialized = true;
2081         int cuew_result = cuewInit();
2082         if(cuew_result == CUEW_SUCCESS) {
2083                 VLOG(1) << "CUEW initialization succeeded";
2084                 if(CUDADevice::have_precompiled_kernels()) {
2085                         VLOG(1) << "Found precompiled kernels";
2086                         result = true;
2087                 }
2088 #ifndef _WIN32
2089                 else if(cuewCompilerPath() != NULL) {
2090                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2091                         result = true;
2092                 }
2093                 else {
2094                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
2095                                 << " unable to use CUDA";
2096                 }
2097 #endif
2098         }
2099         else {
2100                 VLOG(1) << "CUEW initialization failed: "
2101                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2102                             ? "Error setting up atexit() handler"
2103                             : "Error opening the library");
2104         }
2105
2106         return result;
2107 #else  /* WITH_CUDA_DYNLOAD */
2108         return true;
2109 #endif /* WITH_CUDA_DYNLOAD */
2110 }
2111
2112 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2113 {
2114         return new CUDADevice(info, stats, background);
2115 }
2116
2117 static CUresult device_cuda_safe_init()
2118 {
2119 #ifdef _WIN32
2120         __try {
2121                 return cuInit(0);
2122         }
2123         __except(EXCEPTION_EXECUTE_HANDLER) {
2124                 /* Ignore crashes inside the CUDA driver and hope we can
2125                  * survive even with corrupted CUDA installs. */
2126                 fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");
2127         }
2128
2129         return CUDA_ERROR_NO_DEVICE;
2130 #else
2131         return cuInit(0);
2132 #endif
2133 }
2134
2135 void device_cuda_info(vector<DeviceInfo>& devices)
2136 {
2137         CUresult result = device_cuda_safe_init();
2138         if(result != CUDA_SUCCESS) {
2139                 if(result != CUDA_ERROR_NO_DEVICE)
2140                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2141                 return;
2142         }
2143
2144         int count = 0;
2145         result = cuDeviceGetCount(&count);
2146         if(result != CUDA_SUCCESS) {
2147                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2148                 return;
2149         }
2150
2151         vector<DeviceInfo> display_devices;
2152
2153         for(int num = 0; num < count; num++) {
2154                 char name[256];
2155
2156                 result = cuDeviceGetName(name, 256, num);
2157                 if(result != CUDA_SUCCESS) {
2158                         fprintf(stderr, "CUDA cuDeviceGetName: %s\n", cuewErrorString(result));
2159                         continue;
2160                 }
2161
2162                 int major;
2163                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2164                 if(major < 2) {
2165                         VLOG(1) << "Ignoring device \"" << name
2166                                 << "\", compute capability is too low.";
2167                         continue;
2168                 }
2169
2170                 DeviceInfo info;
2171
2172                 info.type = DEVICE_CUDA;
2173                 info.description = string(name);
2174                 info.num = num;
2175
2176                 info.advanced_shading = (major >= 2);
2177                 info.has_bindless_textures = (major >= 3);
2178                 info.has_volume_decoupled = false;
2179                 info.has_qbvh = false;
2180
2181                 int pci_location[3] = {0, 0, 0};
2182                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2183                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2184                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2185                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2186                                         name,
2187                                         (unsigned int)pci_location[0],
2188                                         (unsigned int)pci_location[1],
2189                                         (unsigned int)pci_location[2]);
2190
2191                 /* If device has a kernel timeout and no compute preemption, we assume
2192                  * it is connected to a display and will freeze the display while doing
2193                  * computations. */
2194                 int timeout_attr = 0, preempt_attr = 0;
2195                 cuDeviceGetAttribute(&timeout_attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num);
2196                 cuDeviceGetAttribute(&preempt_attr, CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, num);
2197
2198                 if(timeout_attr && !preempt_attr) {
2199                         VLOG(1) << "Device is recognized as display.";
2200                         info.description += " (Display)";
2201                         info.display_device = true;
2202                         display_devices.push_back(info);
2203                 }
2204                 else {
2205                         devices.push_back(info);
2206                 }
2207                 VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
2208         }
2209
2210         if(!display_devices.empty())
2211                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2212 }
2213
2214 string device_cuda_capabilities(void)
2215 {
2216         CUresult result = device_cuda_safe_init();
2217         if(result != CUDA_SUCCESS) {
2218                 if(result != CUDA_ERROR_NO_DEVICE) {
2219                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2220                 }
2221                 return "No CUDA device found\n";
2222         }
2223
2224         int count;
2225         result = cuDeviceGetCount(&count);
2226         if(result != CUDA_SUCCESS) {
2227                 return string("Error getting devices: ") + cuewErrorString(result);
2228         }
2229
2230         string capabilities = "";
2231         for(int num = 0; num < count; num++) {
2232                 char name[256];
2233                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2234                         continue;
2235                 }
2236                 capabilities += string("\t") + name + "\n";
2237                 int value;
2238 #define GET_ATTR(attr) \
2239                 { \
2240                         if(cuDeviceGetAttribute(&value, \
2241                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2242                                                 num) == CUDA_SUCCESS) \
2243                         { \
2244                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2245                                                               value); \
2246                         } \
2247                 } (void)0
2248                 /* TODO(sergey): Strip all attributes which are not useful for us
2249                  * or does not depend on the driver.
2250                  */
2251                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2252                 GET_ATTR(MAX_BLOCK_DIM_X);
2253                 GET_ATTR(MAX_BLOCK_DIM_Y);
2254                 GET_ATTR(MAX_BLOCK_DIM_Z);
2255                 GET_ATTR(MAX_GRID_DIM_X);
2256                 GET_ATTR(MAX_GRID_DIM_Y);
2257                 GET_ATTR(MAX_GRID_DIM_Z);
2258                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2259                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2260                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2261                 GET_ATTR(WARP_SIZE);
2262                 GET_ATTR(MAX_PITCH);
2263                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2264                 GET_ATTR(REGISTERS_PER_BLOCK);
2265                 GET_ATTR(CLOCK_RATE);
2266                 GET_ATTR(TEXTURE_ALIGNMENT);
2267                 GET_ATTR(GPU_OVERLAP);
2268                 GET_ATTR(MULTIPROCESSOR_COUNT);
2269                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2270                 GET_ATTR(INTEGRATED);
2271                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2272                 GET_ATTR(COMPUTE_MODE);
2273                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2274                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2275                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2276                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2277                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2278                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2279                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2280                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2281                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2282                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2283                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2284                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2285                 GET_ATTR(SURFACE_ALIGNMENT);
2286                 GET_ATTR(CONCURRENT_KERNELS);
2287                 GET_ATTR(ECC_ENABLED);
2288                 GET_ATTR(TCC_DRIVER);
2289                 GET_ATTR(MEMORY_CLOCK_RATE);
2290                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2291                 GET_ATTR(L2_CACHE_SIZE);
2292                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2293                 GET_ATTR(ASYNC_ENGINE_COUNT);
2294                 GET_ATTR(UNIFIED_ADDRESSING);
2295                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2296                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2297                 GET_ATTR(CAN_TEX2D_GATHER);
2298                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2299                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2300                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2301                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2302                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2303                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2304                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2305                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2306                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2307                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2308                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2309                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2310                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2311                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2312                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2313                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2314                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2315                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2316                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2317                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2318                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2319                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2320                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2321                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2322                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2323                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2324                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2325                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2326                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2327                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2328                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2329                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2330                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2331                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2332                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2333                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2334                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2335                 GET_ATTR(MANAGED_MEMORY);
2336                 GET_ATTR(MULTI_GPU_BOARD);
2337                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2338 #undef GET_ATTR
2339                 capabilities += "\n";
2340         }
2341
2342         return capabilities;
2343 }
2344
2345 CCL_NAMESPACE_END