4c1a49878f5d80dcc5d20e96b4fa8d1b22c56e5b
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_intern.h"
25 #include "device/device_split_kernel.h"
26
27 #include "render/buffers.h"
28
29 #ifdef WITH_CUDA_DYNLOAD
30 #  include "cuew.h"
31 #else
32 #  include "util/util_opengl.h"
33 #  include <cuda.h>
34 #  include <cudaGL.h>
35 #endif
36 #include "util/util_debug.h"
37 #include "util/util_logging.h"
38 #include "util/util_map.h"
39 #include "util/util_md5.h"
40 #include "util/util_opengl.h"
41 #include "util/util_path.h"
42 #include "util/util_string.h"
43 #include "util/util_system.h"
44 #include "util/util_types.h"
45 #include "util/util_time.h"
46
47 #include "kernel/split/kernel_split_data_types.h"
48
49 CCL_NAMESPACE_BEGIN
50
51 #ifndef WITH_CUDA_DYNLOAD
52
53 /* Transparently implement some functions, so majority of the file does not need
54  * to worry about difference between dynamically loaded and linked CUDA at all.
55  */
56
57 namespace {
58
59 const char *cuewErrorString(CUresult result)
60 {
61         /* We can only give error code here without major code duplication, that
62          * should be enough since dynamic loading is only being disabled by folks
63          * who knows what they're doing anyway.
64          *
65          * NOTE: Avoid call from several threads.
66          */
67         static string error;
68         error = string_printf("%d", result);
69         return error.c_str();
70 }
71
72 const char *cuewCompilerPath(void)
73 {
74         return CYCLES_CUDA_NVCC_EXECUTABLE;
75 }
76
77 int cuewCompilerVersion(void)
78 {
79         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
80 }
81
82 }  /* namespace */
83 #endif  /* WITH_CUDA_DYNLOAD */
84
85 class CUDADevice;
86
87 class CUDASplitKernel : public DeviceSplitKernel {
88         CUDADevice *device;
89 public:
90         explicit CUDASplitKernel(CUDADevice *device);
91
92         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
93
94         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
95                                                     RenderTile& rtile,
96                                                     int num_global_elements,
97                                                     device_memory& kernel_globals,
98                                                     device_memory& kernel_data_,
99                                                     device_memory& split_data,
100                                                     device_memory& ray_state,
101                                                     device_memory& queue_index,
102                                                     device_memory& use_queues_flag,
103                                                     device_memory& work_pool_wgs);
104
105         virtual SplitKernelFunction* get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&);
106         virtual int2 split_kernel_local_size();
107         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
108 };
109
110 class CUDADevice : public Device
111 {
112 public:
113         DedicatedTaskPool task_pool;
114         CUdevice cuDevice;
115         CUcontext cuContext;
116         CUmodule cuModule;
117         map<device_ptr, bool> tex_interp_map;
118         map<device_ptr, uint> tex_bindless_map;
119         int cuDevId;
120         int cuDevArchitecture;
121         bool first_error;
122
123         struct PixelMem {
124                 GLuint cuPBO;
125                 CUgraphicsResource cuPBOresource;
126                 GLuint cuTexId;
127                 int w, h;
128         };
129
130         map<device_ptr, PixelMem> pixel_mem_map;
131
132         /* Bindless Textures */
133         device_vector<uint> bindless_mapping;
134         bool need_bindless_mapping;
135
136         CUdeviceptr cuda_device_ptr(device_ptr mem)
137         {
138                 return (CUdeviceptr)mem;
139         }
140
141         static bool have_precompiled_kernels()
142         {
143                 string cubins_path = path_get("lib");
144                 return path_exists(cubins_path);
145         }
146
147         virtual bool show_samples() const
148         {
149                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
150                 return true;
151         }
152
153 /*#ifdef NDEBUG
154 #define cuda_abort()
155 #else
156 #define cuda_abort() abort()
157 #endif*/
158         void cuda_error_documentation()
159         {
160                 if(first_error) {
161                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
162                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
163                         first_error = false;
164                 }
165         }
166
167 #define cuda_assert(stmt) \
168         { \
169                 CUresult result = stmt; \
170                 \
171                 if(result != CUDA_SUCCESS) { \
172                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
173                         if(error_msg == "") \
174                                 error_msg = message; \
175                         fprintf(stderr, "%s\n", message.c_str()); \
176                         /*cuda_abort();*/ \
177                         cuda_error_documentation(); \
178                 } \
179         } (void)0
180
181         bool cuda_error_(CUresult result, const string& stmt)
182         {
183                 if(result == CUDA_SUCCESS)
184                         return false;
185
186                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
187                 if(error_msg == "")
188                         error_msg = message;
189                 fprintf(stderr, "%s\n", message.c_str());
190                 cuda_error_documentation();
191                 return true;
192         }
193
194 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
195
196         void cuda_error_message(const string& message)
197         {
198                 if(error_msg == "")
199                         error_msg = message;
200                 fprintf(stderr, "%s\n", message.c_str());
201                 cuda_error_documentation();
202         }
203
204         void cuda_push_context()
205         {
206                 cuda_assert(cuCtxSetCurrent(cuContext));
207         }
208
209         void cuda_pop_context()
210         {
211                 cuda_assert(cuCtxSetCurrent(NULL));
212         }
213
214         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
215         : Device(info, stats, background_)
216         {
217                 first_error = true;
218                 background = background_;
219
220                 cuDevId = info.num;
221                 cuDevice = 0;
222                 cuContext = 0;
223
224                 need_bindless_mapping = false;
225
226                 /* intialize */
227                 if(cuda_error(cuInit(0)))
228                         return;
229
230                 /* setup device and context */
231                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
232                         return;
233
234                 CUresult result;
235
236                 if(background) {
237                         result = cuCtxCreate(&cuContext, 0, cuDevice);
238                 }
239                 else {
240                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
241
242                         if(result != CUDA_SUCCESS) {
243                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
244                                 background = true;
245                         }
246                 }
247
248                 if(cuda_error_(result, "cuCtxCreate"))
249                         return;
250
251                 int major, minor;
252                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
253                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
254                 cuDevArchitecture = major*100 + minor*10;
255
256                 cuda_pop_context();
257         }
258
259         ~CUDADevice()
260         {
261                 task_pool.stop();
262
263                 if(info.has_bindless_textures) {
264                         tex_free(bindless_mapping);
265                 }
266
267                 cuda_assert(cuCtxDestroy(cuContext));
268         }
269
270         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
271         {
272                 int major, minor;
273                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
274                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
275
276                 /* We only support sm_20 and above */
277                 if(major < 2) {
278                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
279                         return false;
280                 }
281
282                 return true;
283         }
284
285         bool use_adaptive_compilation()
286         {
287                 return DebugFlags().cuda.adaptive_compile;
288         }
289
290         bool use_split_kernel()
291         {
292                 return DebugFlags().cuda.split_kernel;
293         }
294
295         /* Common NVCC flags which stays the same regardless of shading model,
296          * kernel sources md5 and only depends on compiler or compilation settings.
297          */
298         string compile_kernel_get_common_cflags(
299                 const DeviceRequestedFeatures& requested_features, bool split=false)
300         {
301                 const int cuda_version = cuewCompilerVersion();
302                 const int machine = system_cpu_bits();
303                 const string source_path = path_get("source");
304                 const string include_path = source_path;
305                 string cflags = string_printf("-m%d "
306                                               "--ptxas-options=\"-v\" "
307                                               "--use_fast_math "
308                                               "-DNVCC "
309                                               "-D__KERNEL_CUDA_VERSION__=%d "
310                                                "-I\"%s\"",
311                                               machine,
312                                               cuda_version,
313                                               include_path.c_str());
314                 if(use_adaptive_compilation()) {
315                         cflags += " " + requested_features.get_build_options();
316                 }
317                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
318                 if(extra_cflags) {
319                         cflags += string(" ") + string(extra_cflags);
320                 }
321 #ifdef WITH_CYCLES_DEBUG
322                 cflags += " -D__KERNEL_DEBUG__";
323 #endif
324
325                 if(split) {
326                         cflags += " -D__SPLIT__";
327                 }
328
329                 return cflags;
330         }
331
332         bool compile_check_compiler() {
333                 const char *nvcc = cuewCompilerPath();
334                 if(nvcc == NULL) {
335                         cuda_error_message("CUDA nvcc compiler not found. "
336                                            "Install CUDA toolkit in default location.");
337                         return false;
338                 }
339                 const int cuda_version = cuewCompilerVersion();
340                 VLOG(1) << "Found nvcc " << nvcc
341                         << ", CUDA version " << cuda_version
342                         << ".";
343                 const int major = cuda_version / 10, minor = cuda_version & 10;
344                 if(cuda_version == 0) {
345                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
346                         return false;
347                 }
348                 if(cuda_version < 80) {
349                         printf("Unsupported CUDA version %d.%d detected, "
350                                "you need CUDA 8.0 or newer.\n",
351                                major, minor);
352                         return false;
353                 }
354                 else if(cuda_version != 80) {
355                         printf("CUDA version %d.%d detected, build may succeed but only "
356                                "CUDA 8.0 is officially supported.\n",
357                                major, minor);
358                 }
359                 return true;
360         }
361
362         string compile_kernel(const DeviceRequestedFeatures& requested_features, bool split=false)
363         {
364                 /* Compute cubin name. */
365                 int major, minor;
366                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
367                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
368
369                 /* Attempt to use kernel provided with Blender. */
370                 if(!use_adaptive_compilation()) {
371                         const string cubin = path_get(string_printf(split ? "lib/kernel_split_sm_%d%d.cubin"
372                                                                           : "lib/kernel_sm_%d%d.cubin",
373                                                                     major, minor));
374                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
375                         if(path_exists(cubin)) {
376                                 VLOG(1) << "Using precompiled kernel.";
377                                 return cubin;
378                         }
379                 }
380
381                 const string common_cflags =
382                         compile_kernel_get_common_cflags(requested_features, split);
383
384                 /* Try to use locally compiled kernel. */
385                 const string source_path = path_get("source");
386                 const string kernel_md5 = path_files_md5_hash(source_path);
387
388                 /* We include cflags into md5 so changing cuda toolkit or changing other
389                  * compiler command line arguments makes sure cubin gets re-built.
390                  */
391                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
392
393                 const string cubin_file = string_printf(split ? "cycles_kernel_split_sm%d%d_%s.cubin"
394                                                               : "cycles_kernel_sm%d%d_%s.cubin",
395                                                         major, minor,
396                                                         cubin_md5.c_str());
397                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
398                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
399                 if(path_exists(cubin)) {
400                         VLOG(1) << "Using locally compiled kernel.";
401                         return cubin;
402                 }
403
404 #ifdef _WIN32
405                 if(have_precompiled_kernels()) {
406                         if(major < 2) {
407                                 cuda_error_message(string_printf(
408                                         "CUDA device requires compute capability 2.0 or up, "
409                                         "found %d.%d. Your GPU is not supported.",
410                                         major, minor));
411                         }
412                         else {
413                                 cuda_error_message(string_printf(
414                                         "CUDA binary kernel for this graphics card compute "
415                                         "capability (%d.%d) not found.",
416                                         major, minor));
417                         }
418                         return "";
419                 }
420 #endif
421
422                 /* Compile. */
423                 if(!compile_check_compiler()) {
424                         return "";
425                 }
426                 const char *nvcc = cuewCompilerPath();
427                 const string kernel = path_join(
428                         path_join(source_path, "kernel"),
429                         path_join("kernels",
430                                   path_join("cuda", split ? "kernel_split.cu" : "kernel.cu")));
431                 double starttime = time_dt();
432                 printf("Compiling CUDA kernel ...\n");
433
434                 path_create_directories(cubin);
435
436                 string command = string_printf("\"%s\" "
437                                                "-arch=sm_%d%d "
438                                                "--cubin \"%s\" "
439                                                "-o \"%s\" "
440                                                "%s ",
441                                                nvcc,
442                                                major, minor,
443                                                kernel.c_str(),
444                                                cubin.c_str(),
445                                                common_cflags.c_str());
446
447                 printf("%s\n", command.c_str());
448
449                 if(system(command.c_str()) == -1) {
450                         cuda_error_message("Failed to execute compilation command, "
451                                            "see console for details.");
452                         return "";
453                 }
454
455                 /* Verify if compilation succeeded */
456                 if(!path_exists(cubin)) {
457                         cuda_error_message("CUDA kernel compilation failed, "
458                                            "see console for details.");
459                         return "";
460                 }
461
462                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
463
464                 return cubin;
465         }
466
467         bool load_kernels(const DeviceRequestedFeatures& requested_features)
468         {
469                 /* check if cuda init succeeded */
470                 if(cuContext == 0)
471                         return false;
472
473                 /* check if GPU is supported */
474                 if(!support_device(requested_features))
475                         return false;
476
477                 /* get kernel */
478                 string cubin = compile_kernel(requested_features, use_split_kernel());
479
480                 if(cubin == "")
481                         return false;
482
483                 /* open module */
484                 cuda_push_context();
485
486                 string cubin_data;
487                 CUresult result;
488
489                 if(path_read_text(cubin, cubin_data))
490                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
491                 else
492                         result = CUDA_ERROR_FILE_NOT_FOUND;
493
494                 if(cuda_error_(result, "cuModuleLoad"))
495                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
496
497                 cuda_pop_context();
498
499                 return (result == CUDA_SUCCESS);
500         }
501
502         void load_bindless_mapping()
503         {
504                 if(info.has_bindless_textures && need_bindless_mapping) {
505                         tex_free(bindless_mapping);
506                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
507                         need_bindless_mapping = false;
508                 }
509         }
510
511         void mem_alloc(const char *name, device_memory& mem, MemoryType /*type*/)
512         {
513                 if(name) {
514                         VLOG(1) << "Buffer allocate: " << name << ", "
515                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
516                                 << string_human_readable_size(mem.memory_size()) << ")";
517                 }
518
519                 cuda_push_context();
520                 CUdeviceptr device_pointer;
521                 size_t size = mem.memory_size();
522                 cuda_assert(cuMemAlloc(&device_pointer, size));
523                 mem.device_pointer = (device_ptr)device_pointer;
524                 mem.device_size = size;
525                 stats.mem_alloc(size);
526                 cuda_pop_context();
527         }
528
529         void mem_copy_to(device_memory& mem)
530         {
531                 cuda_push_context();
532                 if(mem.device_pointer)
533                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
534                 cuda_pop_context();
535         }
536
537         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
538         {
539                 size_t offset = elem*y*w;
540                 size_t size = elem*w*h;
541
542                 cuda_push_context();
543                 if(mem.device_pointer) {
544                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
545                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
546                 }
547                 else {
548                         memset((char*)mem.data_pointer + offset, 0, size);
549                 }
550                 cuda_pop_context();
551         }
552
553         void mem_zero(device_memory& mem)
554         {
555                 if(mem.data_pointer) {
556                         memset((void*)mem.data_pointer, 0, mem.memory_size());
557                 }
558
559                 cuda_push_context();
560                 if(mem.device_pointer)
561                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
562                 cuda_pop_context();
563         }
564
565         void mem_free(device_memory& mem)
566         {
567                 if(mem.device_pointer) {
568                         cuda_push_context();
569                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
570                         cuda_pop_context();
571
572                         mem.device_pointer = 0;
573
574                         stats.mem_free(mem.device_size);
575                         mem.device_size = 0;
576                 }
577         }
578
579         void const_copy_to(const char *name, void *host, size_t size)
580         {
581                 CUdeviceptr mem;
582                 size_t bytes;
583
584                 cuda_push_context();
585                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
586                 //assert(bytes == size);
587                 cuda_assert(cuMemcpyHtoD(mem, host, size));
588                 cuda_pop_context();
589         }
590
591         void tex_alloc(const char *name,
592                        device_memory& mem,
593                        InterpolationType interpolation,
594                        ExtensionType extension)
595         {
596                 VLOG(1) << "Texture allocate: " << name << ", "
597                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
598                         << string_human_readable_size(mem.memory_size()) << ")";
599
600                 /* Check if we are on sm_30 or above.
601                  * We use arrays and bindles textures for storage there */
602                 bool has_bindless_textures = info.has_bindless_textures;
603
604                 /* General variables for both architectures */
605                 string bind_name = name;
606                 size_t dsize = datatype_size(mem.data_type);
607                 size_t size = mem.memory_size();
608
609                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
610                 switch(extension) {
611                         case EXTENSION_REPEAT:
612                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
613                                 break;
614                         case EXTENSION_EXTEND:
615                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
616                                 break;
617                         case EXTENSION_CLIP:
618                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
619                                 break;
620                         default:
621                                 assert(0);
622                                 break;
623                 }
624
625                 CUfilter_mode filter_mode;
626                 if(interpolation == INTERPOLATION_CLOSEST) {
627                         filter_mode = CU_TR_FILTER_MODE_POINT;
628                 }
629                 else {
630                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
631                 }
632
633                 CUarray_format_enum format;
634                 switch(mem.data_type) {
635                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
636                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
637                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
638                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
639                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
640                         default: assert(0); return;
641                 }
642
643                 /* General variables for Fermi */
644                 CUtexref texref = NULL;
645
646                 if(!has_bindless_textures) {
647                         if(mem.data_depth > 1) {
648                                 /* Kernel uses different bind names for 2d and 3d float textures,
649                                  * so we have to adjust couple of things here.
650                                  */
651                                 vector<string> tokens;
652                                 string_split(tokens, name, "_");
653                                 bind_name = string_printf("__tex_image_%s_3d_%s",
654                                                           tokens[2].c_str(),
655                                                           tokens[3].c_str());
656                         }
657
658                         cuda_push_context();
659                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
660                         cuda_pop_context();
661
662                         if(!texref) {
663                                 return;
664                         }
665                 }
666
667                 /* Data Storage */
668                 if(interpolation == INTERPOLATION_NONE) {
669                         if(has_bindless_textures) {
670                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
671                                 mem_copy_to(mem);
672
673                                 cuda_push_context();
674
675                                 CUdeviceptr cumem;
676                                 size_t cubytes;
677
678                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
679
680                                 if(cubytes == 8) {
681                                         /* 64 bit device pointer */
682                                         uint64_t ptr = mem.device_pointer;
683                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
684                                 }
685                                 else {
686                                         /* 32 bit device pointer */
687                                         uint32_t ptr = (uint32_t)mem.device_pointer;
688                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
689                                 }
690
691                                 cuda_pop_context();
692                         }
693                         else {
694                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
695                                 mem_copy_to(mem);
696
697                                 cuda_push_context();
698
699                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
700                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
701                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
702
703                                 cuda_pop_context();
704                         }
705                 }
706                 /* Texture Storage */
707                 else {
708                         CUarray handle = NULL;
709
710                         cuda_push_context();
711
712                         if(mem.data_depth > 1) {
713                                 CUDA_ARRAY3D_DESCRIPTOR desc;
714
715                                 desc.Width = mem.data_width;
716                                 desc.Height = mem.data_height;
717                                 desc.Depth = mem.data_depth;
718                                 desc.Format = format;
719                                 desc.NumChannels = mem.data_elements;
720                                 desc.Flags = 0;
721
722                                 cuda_assert(cuArray3DCreate(&handle, &desc));
723                         }
724                         else {
725                                 CUDA_ARRAY_DESCRIPTOR desc;
726
727                                 desc.Width = mem.data_width;
728                                 desc.Height = mem.data_height;
729                                 desc.Format = format;
730                                 desc.NumChannels = mem.data_elements;
731
732                                 cuda_assert(cuArrayCreate(&handle, &desc));
733                         }
734
735                         if(!handle) {
736                                 cuda_pop_context();
737                                 return;
738                         }
739
740                         /* Allocate 3D, 2D or 1D memory */
741                         if(mem.data_depth > 1) {
742                                 CUDA_MEMCPY3D param;
743                                 memset(&param, 0, sizeof(param));
744                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
745                                 param.dstArray = handle;
746                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
747                                 param.srcHost = (void*)mem.data_pointer;
748                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
749                                 param.WidthInBytes = param.srcPitch;
750                                 param.Height = mem.data_height;
751                                 param.Depth = mem.data_depth;
752
753                                 cuda_assert(cuMemcpy3D(&param));
754                         }
755                         else if(mem.data_height > 1) {
756                                 CUDA_MEMCPY2D param;
757                                 memset(&param, 0, sizeof(param));
758                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
759                                 param.dstArray = handle;
760                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
761                                 param.srcHost = (void*)mem.data_pointer;
762                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
763                                 param.WidthInBytes = param.srcPitch;
764                                 param.Height = mem.data_height;
765
766                                 cuda_assert(cuMemcpy2D(&param));
767                         }
768                         else
769                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
770
771                         /* Fermi and Kepler */
772                         mem.device_pointer = (device_ptr)handle;
773                         mem.device_size = size;
774
775                         stats.mem_alloc(size);
776
777                         /* Bindless Textures - Kepler */
778                         if(has_bindless_textures) {
779                                 int flat_slot = 0;
780                                 if(string_startswith(name, "__tex_image")) {
781                                         int pos =  string(name).rfind("_");
782                                         flat_slot = atoi(name + pos + 1);
783                                 }
784                                 else {
785                                         assert(0);
786                                 }
787
788                                 CUDA_RESOURCE_DESC resDesc;
789                                 memset(&resDesc, 0, sizeof(resDesc));
790                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
791                                 resDesc.res.array.hArray = handle;
792                                 resDesc.flags = 0;
793
794                                 CUDA_TEXTURE_DESC texDesc;
795                                 memset(&texDesc, 0, sizeof(texDesc));
796                                 texDesc.addressMode[0] = address_mode;
797                                 texDesc.addressMode[1] = address_mode;
798                                 texDesc.addressMode[2] = address_mode;
799                                 texDesc.filterMode = filter_mode;
800                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
801
802                                 CUtexObject tex = 0;
803                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
804
805                                 /* Safety check */
806                                 if((uint)tex > UINT_MAX) {
807                                         assert(0);
808                                 }
809
810                                 /* Resize once */
811                                 if(flat_slot >= bindless_mapping.size()) {
812                                         /* Allocate some slots in advance, to reduce amount
813                                          * of re-allocations.
814                                          */
815                                         bindless_mapping.resize(flat_slot + 128);
816                                 }
817
818                                 /* Set Mapping and tag that we need to (re-)upload to device */
819                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
820                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
821                                 need_bindless_mapping = true;
822                         }
823                         /* Regular Textures - Fermi */
824                         else {
825                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
826                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
827                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
828                         }
829
830                         cuda_pop_context();
831                 }
832
833                 /* Fermi, Data and Image Textures */
834                 if(!has_bindless_textures) {
835                         cuda_push_context();
836
837                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
838                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
839                         if(mem.data_depth > 1) {
840                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
841                         }
842
843                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
844
845                         cuda_pop_context();
846                 }
847
848                 /* Fermi and Kepler */
849                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
850         }
851
852         void tex_free(device_memory& mem)
853         {
854                 if(mem.device_pointer) {
855                         if(tex_interp_map[mem.device_pointer]) {
856                                 cuda_push_context();
857                                 cuArrayDestroy((CUarray)mem.device_pointer);
858                                 cuda_pop_context();
859
860                                 /* Free CUtexObject (Bindless Textures) */
861                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
862                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
863                                         cuTexObjectDestroy(flat_slot);
864                                 }
865
866                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
867                                 mem.device_pointer = 0;
868
869                                 stats.mem_free(mem.device_size);
870                                 mem.device_size = 0;
871                         }
872                         else {
873                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
874                                 mem_free(mem);
875                         }
876                 }
877         }
878
879         void path_trace(RenderTile& rtile, int sample, bool branched)
880         {
881                 if(have_error())
882                         return;
883
884                 cuda_push_context();
885
886                 CUfunction cuPathTrace;
887                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
888                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
889
890                 /* get kernel function */
891                 if(branched) {
892                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
893                 }
894                 else {
895                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
896                 }
897
898                 if(have_error())
899                         return;
900
901                 /* pass in parameters */
902                 void *args[] = {&d_buffer,
903                                 &d_rng_state,
904                                 &sample,
905                                 &rtile.x,
906                                 &rtile.y,
907                                 &rtile.w,
908                                 &rtile.h,
909                                 &rtile.offset,
910                                 &rtile.stride};
911
912                 /* launch kernel */
913                 int threads_per_block;
914                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
915
916                 /*int num_registers;
917                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
918
919                 printf("threads_per_block %d\n", threads_per_block);
920                 printf("num_registers %d\n", num_registers);*/
921
922                 int xthreads = (int)sqrt(threads_per_block);
923                 int ythreads = (int)sqrt(threads_per_block);
924                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
925                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
926
927                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
928
929                 cuda_assert(cuLaunchKernel(cuPathTrace,
930                                            xblocks , yblocks, 1, /* blocks */
931                                            xthreads, ythreads, 1, /* threads */
932                                            0, 0, args, 0));
933
934                 cuda_assert(cuCtxSynchronize());
935
936                 cuda_pop_context();
937         }
938
939         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
940         {
941                 if(have_error())
942                         return;
943
944                 cuda_push_context();
945
946                 CUfunction cuFilmConvert;
947                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
948                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
949
950                 /* get kernel function */
951                 if(rgba_half) {
952                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
953                 }
954                 else {
955                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
956                 }
957
958
959                 float sample_scale = 1.0f/(task.sample + 1);
960
961                 /* pass in parameters */
962                 void *args[] = {&d_rgba,
963                                 &d_buffer,
964                                 &sample_scale,
965                                 &task.x,
966                                 &task.y,
967                                 &task.w,
968                                 &task.h,
969                                 &task.offset,
970                                 &task.stride};
971
972                 /* launch kernel */
973                 int threads_per_block;
974                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
975
976                 int xthreads = (int)sqrt(threads_per_block);
977                 int ythreads = (int)sqrt(threads_per_block);
978                 int xblocks = (task.w + xthreads - 1)/xthreads;
979                 int yblocks = (task.h + ythreads - 1)/ythreads;
980
981                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
982
983                 cuda_assert(cuLaunchKernel(cuFilmConvert,
984                                            xblocks , yblocks, 1, /* blocks */
985                                            xthreads, ythreads, 1, /* threads */
986                                            0, 0, args, 0));
987
988                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
989
990                 cuda_pop_context();
991         }
992
993         void shader(DeviceTask& task)
994         {
995                 if(have_error())
996                         return;
997
998                 cuda_push_context();
999
1000                 CUfunction cuShader;
1001                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1002                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1003                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
1004
1005                 /* get kernel function */
1006                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1007                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1008                 }
1009                 else {
1010                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
1011                 }
1012
1013                 /* do tasks in smaller chunks, so we can cancel it */
1014                 const int shader_chunk_size = 65536;
1015                 const int start = task.shader_x;
1016                 const int end = task.shader_x + task.shader_w;
1017                 int offset = task.offset;
1018
1019                 bool canceled = false;
1020                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1021                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1022                                 int shader_w = min(shader_chunk_size, end - shader_x);
1023
1024                                 /* pass in parameters */
1025                                 void *args[8];
1026                                 int arg = 0;
1027                                 args[arg++] = &d_input;
1028                                 args[arg++] = &d_output;
1029                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
1030                                         args[arg++] = &d_output_luma;
1031                                 }
1032                                 args[arg++] = &task.shader_eval_type;
1033                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1034                                         args[arg++] = &task.shader_filter;
1035                                 }
1036                                 args[arg++] = &shader_x;
1037                                 args[arg++] = &shader_w;
1038                                 args[arg++] = &offset;
1039                                 args[arg++] = &sample;
1040
1041                                 /* launch kernel */
1042                                 int threads_per_block;
1043                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1044
1045                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1046
1047                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1048                                 cuda_assert(cuLaunchKernel(cuShader,
1049                                                            xblocks , 1, 1, /* blocks */
1050                                                            threads_per_block, 1, 1, /* threads */
1051                                                            0, 0, args, 0));
1052
1053                                 cuda_assert(cuCtxSynchronize());
1054
1055                                 if(task.get_cancel()) {
1056                                         canceled = true;
1057                                         break;
1058                                 }
1059                         }
1060
1061                         task.update_progress(NULL);
1062                 }
1063
1064                 cuda_pop_context();
1065         }
1066
1067         CUdeviceptr map_pixels(device_ptr mem)
1068         {
1069                 if(!background) {
1070                         PixelMem pmem = pixel_mem_map[mem];
1071                         CUdeviceptr buffer;
1072
1073                         size_t bytes;
1074                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1075                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1076
1077                         return buffer;
1078                 }
1079
1080                 return cuda_device_ptr(mem);
1081         }
1082
1083         void unmap_pixels(device_ptr mem)
1084         {
1085                 if(!background) {
1086                         PixelMem pmem = pixel_mem_map[mem];
1087
1088                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1089                 }
1090         }
1091
1092         void pixels_alloc(device_memory& mem)
1093         {
1094                 if(!background) {
1095                         PixelMem pmem;
1096
1097                         pmem.w = mem.data_width;
1098                         pmem.h = mem.data_height;
1099
1100                         cuda_push_context();
1101
1102                         glGenBuffers(1, &pmem.cuPBO);
1103                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1104                         if(mem.data_type == TYPE_HALF)
1105                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1106                         else
1107                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1108
1109                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1110
1111                         glGenTextures(1, &pmem.cuTexId);
1112                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1113                         if(mem.data_type == TYPE_HALF)
1114                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1115                         else
1116                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1117                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1118                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1119                         glBindTexture(GL_TEXTURE_2D, 0);
1120
1121                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1122
1123                         if(result == CUDA_SUCCESS) {
1124                                 cuda_pop_context();
1125
1126                                 mem.device_pointer = pmem.cuTexId;
1127                                 pixel_mem_map[mem.device_pointer] = pmem;
1128
1129                                 mem.device_size = mem.memory_size();
1130                                 stats.mem_alloc(mem.device_size);
1131
1132                                 return;
1133                         }
1134                         else {
1135                                 /* failed to register buffer, fallback to no interop */
1136                                 glDeleteBuffers(1, &pmem.cuPBO);
1137                                 glDeleteTextures(1, &pmem.cuTexId);
1138
1139                                 cuda_pop_context();
1140
1141                                 background = true;
1142                         }
1143                 }
1144
1145                 Device::pixels_alloc(mem);
1146         }
1147
1148         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1149         {
1150                 if(!background) {
1151                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1152
1153                         cuda_push_context();
1154
1155                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1156                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1157                         size_t offset = sizeof(uchar)*4*y*w;
1158                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1159                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1160                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1161
1162                         cuda_pop_context();
1163
1164                         return;
1165                 }
1166
1167                 Device::pixels_copy_from(mem, y, w, h);
1168         }
1169
1170         void pixels_free(device_memory& mem)
1171         {
1172                 if(mem.device_pointer) {
1173                         if(!background) {
1174                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1175
1176                                 cuda_push_context();
1177
1178                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1179                                 glDeleteBuffers(1, &pmem.cuPBO);
1180                                 glDeleteTextures(1, &pmem.cuTexId);
1181
1182                                 cuda_pop_context();
1183
1184                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1185                                 mem.device_pointer = 0;
1186
1187                                 stats.mem_free(mem.device_size);
1188                                 mem.device_size = 0;
1189
1190                                 return;
1191                         }
1192
1193                         Device::pixels_free(mem);
1194                 }
1195         }
1196
1197         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1198                 const DeviceDrawParams &draw_params)
1199         {
1200                 if(!background) {
1201                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1202                         float *vpointer;
1203
1204                         cuda_push_context();
1205
1206                         /* for multi devices, this assumes the inefficient method that we allocate
1207                          * all pixels on the device even though we only render to a subset */
1208                         size_t offset = 4*y*w;
1209
1210                         if(mem.data_type == TYPE_HALF)
1211                                 offset *= sizeof(GLhalf);
1212                         else
1213                                 offset *= sizeof(uint8_t);
1214
1215                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1216                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1217                         if(mem.data_type == TYPE_HALF)
1218                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1219                         else
1220                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1221                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1222
1223                         glEnable(GL_TEXTURE_2D);
1224
1225                         if(transparent) {
1226                                 glEnable(GL_BLEND);
1227                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1228                         }
1229
1230                         glColor3f(1.0f, 1.0f, 1.0f);
1231
1232                         if(draw_params.bind_display_space_shader_cb) {
1233                                 draw_params.bind_display_space_shader_cb();
1234                         }
1235
1236                         if(!vertex_buffer)
1237                                 glGenBuffers(1, &vertex_buffer);
1238
1239                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1240                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1241                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1242
1243                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1244
1245                         if(vpointer) {
1246                                 /* texture coordinate - vertex pair */
1247                                 vpointer[0] = 0.0f;
1248                                 vpointer[1] = 0.0f;
1249                                 vpointer[2] = dx;
1250                                 vpointer[3] = dy;
1251
1252                                 vpointer[4] = (float)w/(float)pmem.w;
1253                                 vpointer[5] = 0.0f;
1254                                 vpointer[6] = (float)width + dx;
1255                                 vpointer[7] = dy;
1256
1257                                 vpointer[8] = (float)w/(float)pmem.w;
1258                                 vpointer[9] = (float)h/(float)pmem.h;
1259                                 vpointer[10] = (float)width + dx;
1260                                 vpointer[11] = (float)height + dy;
1261
1262                                 vpointer[12] = 0.0f;
1263                                 vpointer[13] = (float)h/(float)pmem.h;
1264                                 vpointer[14] = dx;
1265                                 vpointer[15] = (float)height + dy;
1266
1267                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1268                         }
1269
1270                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1271                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1272
1273                         glEnableClientState(GL_VERTEX_ARRAY);
1274                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1275
1276                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1277
1278                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1279                         glDisableClientState(GL_VERTEX_ARRAY);
1280
1281                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1282
1283                         if(draw_params.unbind_display_space_shader_cb) {
1284                                 draw_params.unbind_display_space_shader_cb();
1285                         }
1286
1287                         if(transparent)
1288                                 glDisable(GL_BLEND);
1289
1290                         glBindTexture(GL_TEXTURE_2D, 0);
1291                         glDisable(GL_TEXTURE_2D);
1292
1293                         cuda_pop_context();
1294
1295                         return;
1296                 }
1297
1298                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1299         }
1300
1301         void thread_run(DeviceTask *task)
1302         {
1303                 if(task->type == DeviceTask::PATH_TRACE) {
1304                         RenderTile tile;
1305
1306                         bool branched = task->integrator_branched;
1307
1308                         /* Upload Bindless Mapping */
1309                         load_bindless_mapping();
1310
1311                         if(!use_split_kernel()) {
1312                                 /* keep rendering tiles until done */
1313                                 while(task->acquire_tile(this, tile)) {
1314                                         int start_sample = tile.start_sample;
1315                                         int end_sample = tile.start_sample + tile.num_samples;
1316
1317                                         for(int sample = start_sample; sample < end_sample; sample++) {
1318                                                 if(task->get_cancel()) {
1319                                                         if(task->need_finish_queue == false)
1320                                                                 break;
1321                                                 }
1322
1323                                                 path_trace(tile, sample, branched);
1324
1325                                                 tile.sample = sample + 1;
1326
1327                                                 task->update_progress(&tile, tile.w*tile.h);
1328                                         }
1329
1330                                         task->release_tile(tile);
1331                                 }
1332                         }
1333                         else {
1334                                 DeviceRequestedFeatures requested_features;
1335                                 if(!use_adaptive_compilation()) {
1336                                         requested_features.max_closure = 64;
1337                                 }
1338
1339                                 CUDASplitKernel split_kernel(this);
1340                                 split_kernel.load_kernels(requested_features);
1341
1342                                 while(task->acquire_tile(this, tile)) {
1343                                         device_memory void_buffer;
1344                                         split_kernel.path_trace(task, tile, void_buffer, void_buffer);
1345
1346                                         task->release_tile(tile);
1347
1348                                         if(task->get_cancel()) {
1349                                                 if(task->need_finish_queue == false)
1350                                                         break;
1351                                         }
1352                                 }
1353                         }
1354                 }
1355                 else if(task->type == DeviceTask::SHADER) {
1356                         /* Upload Bindless Mapping */
1357                         load_bindless_mapping();
1358
1359                         shader(*task);
1360
1361                         cuda_push_context();
1362                         cuda_assert(cuCtxSynchronize());
1363                         cuda_pop_context();
1364                 }
1365         }
1366
1367         class CUDADeviceTask : public DeviceTask {
1368         public:
1369                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1370                 : DeviceTask(task)
1371                 {
1372                         run = function_bind(&CUDADevice::thread_run, device, this);
1373                 }
1374         };
1375
1376         int get_split_task_count(DeviceTask& /*task*/)
1377         {
1378                 return 1;
1379         }
1380
1381         void task_add(DeviceTask& task)
1382         {
1383                 if(task.type == DeviceTask::FILM_CONVERT) {
1384                         /* must be done in main thread due to opengl access */
1385                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1386
1387                         cuda_push_context();
1388                         cuda_assert(cuCtxSynchronize());
1389                         cuda_pop_context();
1390                 }
1391                 else {
1392                         task_pool.push(new CUDADeviceTask(this, task));
1393                 }
1394         }
1395
1396         void task_wait()
1397         {
1398                 task_pool.wait();
1399         }
1400
1401         void task_cancel()
1402         {
1403                 task_pool.cancel();
1404         }
1405
1406         friend class CUDASplitKernelFunction;
1407         friend class CUDASplitKernel;
1408 };
1409
1410 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1411  * now that the definition of that class is complete
1412  */
1413 #undef cuda_assert
1414 #define cuda_assert(stmt) \
1415         { \
1416                 CUresult result = stmt; \
1417                 \
1418                 if(result != CUDA_SUCCESS) { \
1419                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1420                         if(device->error_msg == "") \
1421                                 device->error_msg = message; \
1422                         fprintf(stderr, "%s\n", message.c_str()); \
1423                         /*cuda_abort();*/ \
1424                         device->cuda_error_documentation(); \
1425                 } \
1426         } (void)0
1427
1428 /* split kernel */
1429
1430 class CUDASplitKernelFunction : public SplitKernelFunction{
1431         CUDADevice* device;
1432         CUfunction func;
1433 public:
1434         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
1435
1436         /* enqueue the kernel, returns false if there is an error */
1437         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
1438         {
1439                 return enqueue(dim, NULL);
1440         }
1441
1442         /* enqueue the kernel, returns false if there is an error */
1443         bool enqueue(const KernelDimensions &dim, void *args[])
1444         {
1445                 device->cuda_push_context();
1446
1447                 if(device->have_error())
1448                         return false;
1449
1450                 /* we ignore dim.local_size for now, as this is faster */
1451                 int threads_per_block;
1452                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
1453
1454                 int xthreads = (int)sqrt(threads_per_block);
1455                 int ythreads = (int)sqrt(threads_per_block);
1456
1457                 int xblocks = (dim.global_size[0] + xthreads - 1)/xthreads;
1458                 int yblocks = (dim.global_size[1] + ythreads - 1)/ythreads;
1459
1460                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
1461
1462                 cuda_assert(cuLaunchKernel(func,
1463                                            xblocks , yblocks, 1, /* blocks */
1464                                            xthreads, ythreads, 1, /* threads */
1465                                            0, 0, args, 0));
1466
1467                 device->cuda_pop_context();
1468
1469                 return !device->have_error();
1470         }
1471 };
1472
1473 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
1474 {
1475 }
1476
1477 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
1478 {
1479         device_vector<uint64_t> size_buffer;
1480         size_buffer.resize(1);
1481         device->mem_alloc(NULL, size_buffer, MEM_READ_WRITE);
1482
1483         device->cuda_push_context();
1484
1485         uint threads = num_threads;
1486         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
1487
1488         struct args_t {
1489                 uint* num_threads;
1490                 CUdeviceptr* size;
1491         };
1492
1493         args_t args = {
1494                 &threads,
1495                 &d_size
1496         };
1497
1498         CUfunction state_buffer_size;
1499         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
1500
1501         cuda_assert(cuLaunchKernel(state_buffer_size,
1502                                    1, 1, 1,
1503                                    1, 1, 1,
1504                                    0, 0, (void**)&args, 0));
1505
1506         device->cuda_pop_context();
1507
1508         device->mem_copy_from(size_buffer, 0, 1, 1, sizeof(uint64_t));
1509         device->mem_free(size_buffer);
1510
1511         return *size_buffer.get_data();
1512 }
1513
1514 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
1515                                     RenderTile& rtile,
1516                                     int num_global_elements,
1517                                     device_memory& /*kernel_globals*/,
1518                                     device_memory& /*kernel_data*/,
1519                                     device_memory& split_data,
1520                                     device_memory& ray_state,
1521                                     device_memory& queue_index,
1522                                     device_memory& use_queues_flag,
1523                                     device_memory& work_pool_wgs)
1524 {
1525         device->cuda_push_context();
1526
1527         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
1528         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
1529         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
1530         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
1531         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
1532
1533         CUdeviceptr d_rng_state = device->cuda_device_ptr(rtile.rng_state);
1534         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
1535
1536         int end_sample = rtile.start_sample + rtile.num_samples;
1537         int queue_size = dim.global_size[0] * dim.global_size[1];
1538
1539         struct args_t {
1540                 CUdeviceptr* split_data_buffer;
1541                 int* num_elements;
1542                 CUdeviceptr* ray_state;
1543                 CUdeviceptr* rng_state;
1544                 int* start_sample;
1545                 int* end_sample;
1546                 int* sx;
1547                 int* sy;
1548                 int* sw;
1549                 int* sh;
1550                 int* offset;
1551                 int* stride;
1552                 CUdeviceptr* queue_index;
1553                 int* queuesize;
1554                 CUdeviceptr* use_queues_flag;
1555                 CUdeviceptr* work_pool_wgs;
1556                 int* num_samples;
1557                 CUdeviceptr* buffer;
1558         };
1559
1560         args_t args = {
1561                 &d_split_data,
1562                 &num_global_elements,
1563                 &d_ray_state,
1564                 &d_rng_state,
1565                 &rtile.start_sample,
1566                 &end_sample,
1567                 &rtile.x,
1568                 &rtile.y,
1569                 &rtile.w,
1570                 &rtile.h,
1571                 &rtile.offset,
1572                 &rtile.stride,
1573                 &d_queue_index,
1574                 &queue_size,
1575                 &d_use_queues_flag,
1576                 &d_work_pool_wgs,
1577                 &rtile.num_samples,
1578                 &d_buffer
1579         };
1580
1581         CUfunction data_init;
1582         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
1583         if(device->have_error()) {
1584                 return false;
1585         }
1586
1587         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
1588
1589         device->cuda_pop_context();
1590
1591         return !device->have_error();
1592 }
1593
1594 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&)
1595 {
1596         CUfunction func;
1597
1598         device->cuda_push_context();
1599
1600         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
1601         if(device->have_error()) {
1602                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
1603                 return NULL;
1604         }
1605
1606         device->cuda_pop_context();
1607
1608         return new CUDASplitKernelFunction(device, func);
1609 }
1610
1611 int2 CUDASplitKernel::split_kernel_local_size()
1612 {
1613         return make_int2(32, 1);
1614 }
1615
1616 int2 CUDASplitKernel::split_kernel_global_size(device_memory& /*kg*/, device_memory& /*data*/, DeviceTask * /*task*/)
1617 {
1618         /* TODO(mai): implement something here to detect ideal work size */
1619         return make_int2(256, 256);
1620 }
1621
1622 bool device_cuda_init(void)
1623 {
1624 #ifdef WITH_CUDA_DYNLOAD
1625         static bool initialized = false;
1626         static bool result = false;
1627
1628         if(initialized)
1629                 return result;
1630
1631         initialized = true;
1632         int cuew_result = cuewInit();
1633         if(cuew_result == CUEW_SUCCESS) {
1634                 VLOG(1) << "CUEW initialization succeeded";
1635                 if(CUDADevice::have_precompiled_kernels()) {
1636                         VLOG(1) << "Found precompiled kernels";
1637                         result = true;
1638                 }
1639 #ifndef _WIN32
1640                 else if(cuewCompilerPath() != NULL) {
1641                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
1642                         result = true;
1643                 }
1644                 else {
1645                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1646                                 << " unable to use CUDA";
1647                 }
1648 #endif
1649         }
1650         else {
1651                 VLOG(1) << "CUEW initialization failed: "
1652                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1653                             ? "Error setting up atexit() handler"
1654                             : "Error opening the library");
1655         }
1656
1657         return result;
1658 #else  /* WITH_CUDA_DYNLOAD */
1659         return true;
1660 #endif /* WITH_CUDA_DYNLOAD */
1661 }
1662
1663 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1664 {
1665         return new CUDADevice(info, stats, background);
1666 }
1667
1668 void device_cuda_info(vector<DeviceInfo>& devices)
1669 {
1670         CUresult result;
1671         int count = 0;
1672
1673         result = cuInit(0);
1674         if(result != CUDA_SUCCESS) {
1675                 if(result != CUDA_ERROR_NO_DEVICE)
1676                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1677                 return;
1678         }
1679
1680         result = cuDeviceGetCount(&count);
1681         if(result != CUDA_SUCCESS) {
1682                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1683                 return;
1684         }
1685
1686         vector<DeviceInfo> display_devices;
1687
1688         for(int num = 0; num < count; num++) {
1689                 char name[256];
1690                 int attr;
1691
1692                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1693                         continue;
1694
1695                 int major;
1696                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
1697                 if(major < 2) {
1698                         continue;
1699                 }
1700
1701                 DeviceInfo info;
1702
1703                 info.type = DEVICE_CUDA;
1704                 info.description = string(name);
1705                 info.num = num;
1706
1707                 info.advanced_shading = (major >= 2);
1708                 info.has_bindless_textures = (major >= 3);
1709                 info.pack_images = false;
1710
1711                 int pci_location[3] = {0, 0, 0};
1712                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
1713                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
1714                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
1715                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
1716                                         name,
1717                                         (unsigned int)pci_location[0],
1718                                         (unsigned int)pci_location[1],
1719                                         (unsigned int)pci_location[2]);
1720
1721                 /* if device has a kernel timeout, assume it is used for display */
1722                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1723                         info.description += " (Display)";
1724                         info.display_device = true;
1725                         display_devices.push_back(info);
1726                 }
1727                 else
1728                         devices.push_back(info);
1729         }
1730
1731         if(!display_devices.empty())
1732                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1733 }
1734
1735 string device_cuda_capabilities(void)
1736 {
1737         CUresult result = cuInit(0);
1738         if(result != CUDA_SUCCESS) {
1739                 if(result != CUDA_ERROR_NO_DEVICE) {
1740                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1741                 }
1742                 return "No CUDA device found\n";
1743         }
1744
1745         int count;
1746         result = cuDeviceGetCount(&count);
1747         if(result != CUDA_SUCCESS) {
1748                 return string("Error getting devices: ") + cuewErrorString(result);
1749         }
1750
1751         string capabilities = "";
1752         for(int num = 0; num < count; num++) {
1753                 char name[256];
1754                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1755                         continue;
1756                 }
1757                 capabilities += string("\t") + name + "\n";
1758                 int value;
1759 #define GET_ATTR(attr) \
1760                 { \
1761                         if(cuDeviceGetAttribute(&value, \
1762                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1763                                                 num) == CUDA_SUCCESS) \
1764                         { \
1765                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1766                                                               value); \
1767                         } \
1768                 } (void)0
1769                 /* TODO(sergey): Strip all attributes which are not useful for us
1770                  * or does not depend on the driver.
1771                  */
1772                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1773                 GET_ATTR(MAX_BLOCK_DIM_X);
1774                 GET_ATTR(MAX_BLOCK_DIM_Y);
1775                 GET_ATTR(MAX_BLOCK_DIM_Z);
1776                 GET_ATTR(MAX_GRID_DIM_X);
1777                 GET_ATTR(MAX_GRID_DIM_Y);
1778                 GET_ATTR(MAX_GRID_DIM_Z);
1779                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1780                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1781                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1782                 GET_ATTR(WARP_SIZE);
1783                 GET_ATTR(MAX_PITCH);
1784                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1785                 GET_ATTR(REGISTERS_PER_BLOCK);
1786                 GET_ATTR(CLOCK_RATE);
1787                 GET_ATTR(TEXTURE_ALIGNMENT);
1788                 GET_ATTR(GPU_OVERLAP);
1789                 GET_ATTR(MULTIPROCESSOR_COUNT);
1790                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1791                 GET_ATTR(INTEGRATED);
1792                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1793                 GET_ATTR(COMPUTE_MODE);
1794                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1795                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1796                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1797                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1798                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1799                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1800                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1801                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1802                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1803                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1804                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1805                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1806                 GET_ATTR(SURFACE_ALIGNMENT);
1807                 GET_ATTR(CONCURRENT_KERNELS);
1808                 GET_ATTR(ECC_ENABLED);
1809                 GET_ATTR(TCC_DRIVER);
1810                 GET_ATTR(MEMORY_CLOCK_RATE);
1811                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1812                 GET_ATTR(L2_CACHE_SIZE);
1813                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1814                 GET_ATTR(ASYNC_ENGINE_COUNT);
1815                 GET_ATTR(UNIFIED_ADDRESSING);
1816                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1817                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1818                 GET_ATTR(CAN_TEX2D_GATHER);
1819                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1820                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1821                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1822                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1823                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1824                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1825                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1826                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1827                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1828                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1829                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1830                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1831                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1832                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1833                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1834                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1835                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1836                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1837                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1838                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1839                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1840                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1841                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1842                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1843                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1844                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1845                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1846                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1847                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1848                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1849                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1850                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1851                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1852                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1853                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1854                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1855                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1856                 GET_ATTR(MANAGED_MEMORY);
1857                 GET_ATTR(MULTI_GPU_BOARD);
1858                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1859 #undef GET_ATTR
1860                 capabilities += "\n";
1861         }
1862
1863         return capabilities;
1864 }
1865
1866 CCL_NAMESPACE_END