Merge branch 'master' into blender2.8
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_intern.h"
25 #include "device/device_split_kernel.h"
26
27 #include "render/buffers.h"
28
29 #ifdef WITH_CUDA_DYNLOAD
30 #  include "cuew.h"
31 #else
32 #  include "util/util_opengl.h"
33 #  include <cuda.h>
34 #  include <cudaGL.h>
35 #endif
36 #include "util/util_debug.h"
37 #include "util/util_logging.h"
38 #include "util/util_map.h"
39 #include "util/util_md5.h"
40 #include "util/util_opengl.h"
41 #include "util/util_path.h"
42 #include "util/util_string.h"
43 #include "util/util_system.h"
44 #include "util/util_types.h"
45 #include "util/util_time.h"
46
47 #include "kernel/split/kernel_split_data_types.h"
48
49 CCL_NAMESPACE_BEGIN
50
51 #ifndef WITH_CUDA_DYNLOAD
52
53 /* Transparently implement some functions, so majority of the file does not need
54  * to worry about difference between dynamically loaded and linked CUDA at all.
55  */
56
57 namespace {
58
59 const char *cuewErrorString(CUresult result)
60 {
61         /* We can only give error code here without major code duplication, that
62          * should be enough since dynamic loading is only being disabled by folks
63          * who knows what they're doing anyway.
64          *
65          * NOTE: Avoid call from several threads.
66          */
67         static string error;
68         error = string_printf("%d", result);
69         return error.c_str();
70 }
71
72 const char *cuewCompilerPath(void)
73 {
74         return CYCLES_CUDA_NVCC_EXECUTABLE;
75 }
76
77 int cuewCompilerVersion(void)
78 {
79         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
80 }
81
82 }  /* namespace */
83 #endif  /* WITH_CUDA_DYNLOAD */
84
85 class CUDADevice;
86
87 class CUDASplitKernel : public DeviceSplitKernel {
88         CUDADevice *device;
89 public:
90         explicit CUDASplitKernel(CUDADevice *device);
91
92         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
93
94         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
95                                                     RenderTile& rtile,
96                                                     int num_global_elements,
97                                                     device_memory& kernel_globals,
98                                                     device_memory& kernel_data_,
99                                                     device_memory& split_data,
100                                                     device_memory& ray_state,
101                                                     device_memory& queue_index,
102                                                     device_memory& use_queues_flag,
103                                                     device_memory& work_pool_wgs);
104
105         virtual SplitKernelFunction* get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&);
106         virtual int2 split_kernel_local_size();
107         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
108 };
109
110 class CUDADevice : public Device
111 {
112 public:
113         DedicatedTaskPool task_pool;
114         CUdevice cuDevice;
115         CUcontext cuContext;
116         CUmodule cuModule;
117         map<device_ptr, bool> tex_interp_map;
118         map<device_ptr, uint> tex_bindless_map;
119         int cuDevId;
120         int cuDevArchitecture;
121         bool first_error;
122         CUDASplitKernel *split_kernel;
123
124         struct PixelMem {
125                 GLuint cuPBO;
126                 CUgraphicsResource cuPBOresource;
127                 GLuint cuTexId;
128                 int w, h;
129         };
130
131         map<device_ptr, PixelMem> pixel_mem_map;
132
133         /* Bindless Textures */
134         device_vector<uint> bindless_mapping;
135         bool need_bindless_mapping;
136
137         CUdeviceptr cuda_device_ptr(device_ptr mem)
138         {
139                 return (CUdeviceptr)mem;
140         }
141
142         static bool have_precompiled_kernels()
143         {
144                 string cubins_path = path_get("lib");
145                 return path_exists(cubins_path);
146         }
147
148         virtual bool show_samples() const
149         {
150                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
151                 return true;
152         }
153
154 /*#ifdef NDEBUG
155 #define cuda_abort()
156 #else
157 #define cuda_abort() abort()
158 #endif*/
159         void cuda_error_documentation()
160         {
161                 if(first_error) {
162                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
163                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
164                         first_error = false;
165                 }
166         }
167
168 #define cuda_assert(stmt) \
169         { \
170                 CUresult result = stmt; \
171                 \
172                 if(result != CUDA_SUCCESS) { \
173                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
174                         if(error_msg == "") \
175                                 error_msg = message; \
176                         fprintf(stderr, "%s\n", message.c_str()); \
177                         /*cuda_abort();*/ \
178                         cuda_error_documentation(); \
179                 } \
180         } (void)0
181
182         bool cuda_error_(CUresult result, const string& stmt)
183         {
184                 if(result == CUDA_SUCCESS)
185                         return false;
186
187                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
188                 if(error_msg == "")
189                         error_msg = message;
190                 fprintf(stderr, "%s\n", message.c_str());
191                 cuda_error_documentation();
192                 return true;
193         }
194
195 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
196
197         void cuda_error_message(const string& message)
198         {
199                 if(error_msg == "")
200                         error_msg = message;
201                 fprintf(stderr, "%s\n", message.c_str());
202                 cuda_error_documentation();
203         }
204
205         void cuda_push_context()
206         {
207                 cuda_assert(cuCtxSetCurrent(cuContext));
208         }
209
210         void cuda_pop_context()
211         {
212                 cuda_assert(cuCtxSetCurrent(NULL));
213         }
214
215         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
216         : Device(info, stats, background_)
217         {
218                 first_error = true;
219                 background = background_;
220
221                 cuDevId = info.num;
222                 cuDevice = 0;
223                 cuContext = 0;
224
225                 split_kernel = NULL;
226
227                 need_bindless_mapping = false;
228
229                 /* intialize */
230                 if(cuda_error(cuInit(0)))
231                         return;
232
233                 /* setup device and context */
234                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
235                         return;
236
237                 CUresult result;
238
239                 if(background) {
240                         result = cuCtxCreate(&cuContext, 0, cuDevice);
241                 }
242                 else {
243                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
244
245                         if(result != CUDA_SUCCESS) {
246                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
247                                 background = true;
248                         }
249                 }
250
251                 if(cuda_error_(result, "cuCtxCreate"))
252                         return;
253
254                 int major, minor;
255                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
256                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
257                 cuDevArchitecture = major*100 + minor*10;
258
259                 cuda_pop_context();
260         }
261
262         ~CUDADevice()
263         {
264                 task_pool.stop();
265
266                 delete split_kernel;
267
268                 if(info.has_bindless_textures) {
269                         tex_free(bindless_mapping);
270                 }
271
272                 cuda_assert(cuCtxDestroy(cuContext));
273         }
274
275         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
276         {
277                 int major, minor;
278                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
279                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
280
281                 /* We only support sm_20 and above */
282                 if(major < 2) {
283                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
284                         return false;
285                 }
286
287                 return true;
288         }
289
290         bool use_adaptive_compilation()
291         {
292                 return DebugFlags().cuda.adaptive_compile;
293         }
294
295         bool use_split_kernel()
296         {
297                 return DebugFlags().cuda.split_kernel;
298         }
299
300         /* Common NVCC flags which stays the same regardless of shading model,
301          * kernel sources md5 and only depends on compiler or compilation settings.
302          */
303         string compile_kernel_get_common_cflags(
304                 const DeviceRequestedFeatures& requested_features, bool split=false)
305         {
306                 const int cuda_version = cuewCompilerVersion();
307                 const int machine = system_cpu_bits();
308                 const string source_path = path_get("source");
309                 const string include_path = source_path;
310                 string cflags = string_printf("-m%d "
311                                               "--ptxas-options=\"-v\" "
312                                               "--use_fast_math "
313                                               "-DNVCC "
314                                               "-D__KERNEL_CUDA_VERSION__=%d "
315                                                "-I\"%s\"",
316                                               machine,
317                                               cuda_version,
318                                               include_path.c_str());
319                 if(use_adaptive_compilation()) {
320                         cflags += " " + requested_features.get_build_options();
321                 }
322                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
323                 if(extra_cflags) {
324                         cflags += string(" ") + string(extra_cflags);
325                 }
326 #ifdef WITH_CYCLES_DEBUG
327                 cflags += " -D__KERNEL_DEBUG__";
328 #endif
329
330                 if(split) {
331                         cflags += " -D__SPLIT__";
332                 }
333
334                 return cflags;
335         }
336
337         bool compile_check_compiler() {
338                 const char *nvcc = cuewCompilerPath();
339                 if(nvcc == NULL) {
340                         cuda_error_message("CUDA nvcc compiler not found. "
341                                            "Install CUDA toolkit in default location.");
342                         return false;
343                 }
344                 const int cuda_version = cuewCompilerVersion();
345                 VLOG(1) << "Found nvcc " << nvcc
346                         << ", CUDA version " << cuda_version
347                         << ".";
348                 const int major = cuda_version / 10, minor = cuda_version & 10;
349                 if(cuda_version == 0) {
350                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
351                         return false;
352                 }
353                 if(cuda_version < 80) {
354                         printf("Unsupported CUDA version %d.%d detected, "
355                                "you need CUDA 8.0 or newer.\n",
356                                major, minor);
357                         return false;
358                 }
359                 else if(cuda_version != 80) {
360                         printf("CUDA version %d.%d detected, build may succeed but only "
361                                "CUDA 8.0 is officially supported.\n",
362                                major, minor);
363                 }
364                 return true;
365         }
366
367         string compile_kernel(const DeviceRequestedFeatures& requested_features, bool split=false)
368         {
369                 /* Compute cubin name. */
370                 int major, minor;
371                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
372                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
373
374                 /* Attempt to use kernel provided with Blender. */
375                 if(!use_adaptive_compilation()) {
376                         const string cubin = path_get(string_printf(split ? "lib/kernel_split_sm_%d%d.cubin"
377                                                                           : "lib/kernel_sm_%d%d.cubin",
378                                                                     major, minor));
379                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
380                         if(path_exists(cubin)) {
381                                 VLOG(1) << "Using precompiled kernel.";
382                                 return cubin;
383                         }
384                 }
385
386                 const string common_cflags =
387                         compile_kernel_get_common_cflags(requested_features, split);
388
389                 /* Try to use locally compiled kernel. */
390                 const string source_path = path_get("source");
391                 const string kernel_md5 = path_files_md5_hash(source_path);
392
393                 /* We include cflags into md5 so changing cuda toolkit or changing other
394                  * compiler command line arguments makes sure cubin gets re-built.
395                  */
396                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
397
398                 const string cubin_file = string_printf(split ? "cycles_kernel_split_sm%d%d_%s.cubin"
399                                                               : "cycles_kernel_sm%d%d_%s.cubin",
400                                                         major, minor,
401                                                         cubin_md5.c_str());
402                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
403                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
404                 if(path_exists(cubin)) {
405                         VLOG(1) << "Using locally compiled kernel.";
406                         return cubin;
407                 }
408
409 #ifdef _WIN32
410                 if(have_precompiled_kernels()) {
411                         if(major < 2) {
412                                 cuda_error_message(string_printf(
413                                         "CUDA device requires compute capability 2.0 or up, "
414                                         "found %d.%d. Your GPU is not supported.",
415                                         major, minor));
416                         }
417                         else {
418                                 cuda_error_message(string_printf(
419                                         "CUDA binary kernel for this graphics card compute "
420                                         "capability (%d.%d) not found.",
421                                         major, minor));
422                         }
423                         return "";
424                 }
425 #endif
426
427                 /* Compile. */
428                 if(!compile_check_compiler()) {
429                         return "";
430                 }
431                 const char *nvcc = cuewCompilerPath();
432                 const string kernel = path_join(
433                         path_join(source_path, "kernel"),
434                         path_join("kernels",
435                                   path_join("cuda", split ? "kernel_split.cu" : "kernel.cu")));
436                 double starttime = time_dt();
437                 printf("Compiling CUDA kernel ...\n");
438
439                 path_create_directories(cubin);
440
441                 string command = string_printf("\"%s\" "
442                                                "-arch=sm_%d%d "
443                                                "--cubin \"%s\" "
444                                                "-o \"%s\" "
445                                                "%s ",
446                                                nvcc,
447                                                major, minor,
448                                                kernel.c_str(),
449                                                cubin.c_str(),
450                                                common_cflags.c_str());
451
452                 printf("%s\n", command.c_str());
453
454                 if(system(command.c_str()) == -1) {
455                         cuda_error_message("Failed to execute compilation command, "
456                                            "see console for details.");
457                         return "";
458                 }
459
460                 /* Verify if compilation succeeded */
461                 if(!path_exists(cubin)) {
462                         cuda_error_message("CUDA kernel compilation failed, "
463                                            "see console for details.");
464                         return "";
465                 }
466
467                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
468
469                 return cubin;
470         }
471
472         bool load_kernels(const DeviceRequestedFeatures& requested_features)
473         {
474                 /* check if cuda init succeeded */
475                 if(cuContext == 0)
476                         return false;
477
478                 /* check if GPU is supported */
479                 if(!support_device(requested_features))
480                         return false;
481
482                 /* get kernel */
483                 string cubin = compile_kernel(requested_features, use_split_kernel());
484
485                 if(cubin == "")
486                         return false;
487
488                 /* open module */
489                 cuda_push_context();
490
491                 string cubin_data;
492                 CUresult result;
493
494                 if(path_read_text(cubin, cubin_data))
495                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
496                 else
497                         result = CUDA_ERROR_FILE_NOT_FOUND;
498
499                 if(cuda_error_(result, "cuModuleLoad"))
500                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
501
502                 cuda_pop_context();
503
504                 return (result == CUDA_SUCCESS);
505         }
506
507         void load_bindless_mapping()
508         {
509                 if(info.has_bindless_textures && need_bindless_mapping) {
510                         tex_free(bindless_mapping);
511                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
512                         need_bindless_mapping = false;
513                 }
514         }
515
516         void mem_alloc(const char *name, device_memory& mem, MemoryType /*type*/)
517         {
518                 if(name) {
519                         VLOG(1) << "Buffer allocate: " << name << ", "
520                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
521                                 << string_human_readable_size(mem.memory_size()) << ")";
522                 }
523
524                 cuda_push_context();
525                 CUdeviceptr device_pointer;
526                 size_t size = mem.memory_size();
527                 cuda_assert(cuMemAlloc(&device_pointer, size));
528                 mem.device_pointer = (device_ptr)device_pointer;
529                 mem.device_size = size;
530                 stats.mem_alloc(size);
531                 cuda_pop_context();
532         }
533
534         void mem_copy_to(device_memory& mem)
535         {
536                 cuda_push_context();
537                 if(mem.device_pointer)
538                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
539                 cuda_pop_context();
540         }
541
542         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
543         {
544                 size_t offset = elem*y*w;
545                 size_t size = elem*w*h;
546
547                 cuda_push_context();
548                 if(mem.device_pointer) {
549                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
550                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
551                 }
552                 else {
553                         memset((char*)mem.data_pointer + offset, 0, size);
554                 }
555                 cuda_pop_context();
556         }
557
558         void mem_zero(device_memory& mem)
559         {
560                 if(mem.data_pointer) {
561                         memset((void*)mem.data_pointer, 0, mem.memory_size());
562                 }
563
564                 cuda_push_context();
565                 if(mem.device_pointer)
566                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
567                 cuda_pop_context();
568         }
569
570         void mem_free(device_memory& mem)
571         {
572                 if(mem.device_pointer) {
573                         cuda_push_context();
574                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
575                         cuda_pop_context();
576
577                         mem.device_pointer = 0;
578
579                         stats.mem_free(mem.device_size);
580                         mem.device_size = 0;
581                 }
582         }
583
584         void const_copy_to(const char *name, void *host, size_t size)
585         {
586                 CUdeviceptr mem;
587                 size_t bytes;
588
589                 cuda_push_context();
590                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
591                 //assert(bytes == size);
592                 cuda_assert(cuMemcpyHtoD(mem, host, size));
593                 cuda_pop_context();
594         }
595
596         void tex_alloc(const char *name,
597                        device_memory& mem,
598                        InterpolationType interpolation,
599                        ExtensionType extension)
600         {
601                 VLOG(1) << "Texture allocate: " << name << ", "
602                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
603                         << string_human_readable_size(mem.memory_size()) << ")";
604
605                 /* Check if we are on sm_30 or above.
606                  * We use arrays and bindles textures for storage there */
607                 bool has_bindless_textures = info.has_bindless_textures;
608
609                 /* General variables for both architectures */
610                 string bind_name = name;
611                 size_t dsize = datatype_size(mem.data_type);
612                 size_t size = mem.memory_size();
613
614                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
615                 switch(extension) {
616                         case EXTENSION_REPEAT:
617                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
618                                 break;
619                         case EXTENSION_EXTEND:
620                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
621                                 break;
622                         case EXTENSION_CLIP:
623                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
624                                 break;
625                         default:
626                                 assert(0);
627                                 break;
628                 }
629
630                 CUfilter_mode filter_mode;
631                 if(interpolation == INTERPOLATION_CLOSEST) {
632                         filter_mode = CU_TR_FILTER_MODE_POINT;
633                 }
634                 else {
635                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
636                 }
637
638                 CUarray_format_enum format;
639                 switch(mem.data_type) {
640                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
641                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
642                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
643                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
644                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
645                         default: assert(0); return;
646                 }
647
648                 /* General variables for Fermi */
649                 CUtexref texref = NULL;
650
651                 if(!has_bindless_textures) {
652                         if(mem.data_depth > 1) {
653                                 /* Kernel uses different bind names for 2d and 3d float textures,
654                                  * so we have to adjust couple of things here.
655                                  */
656                                 vector<string> tokens;
657                                 string_split(tokens, name, "_");
658                                 bind_name = string_printf("__tex_image_%s_3d_%s",
659                                                           tokens[2].c_str(),
660                                                           tokens[3].c_str());
661                         }
662
663                         cuda_push_context();
664                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
665                         cuda_pop_context();
666
667                         if(!texref) {
668                                 return;
669                         }
670                 }
671
672                 /* Data Storage */
673                 if(interpolation == INTERPOLATION_NONE) {
674                         if(has_bindless_textures) {
675                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
676                                 mem_copy_to(mem);
677
678                                 cuda_push_context();
679
680                                 CUdeviceptr cumem;
681                                 size_t cubytes;
682
683                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
684
685                                 if(cubytes == 8) {
686                                         /* 64 bit device pointer */
687                                         uint64_t ptr = mem.device_pointer;
688                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
689                                 }
690                                 else {
691                                         /* 32 bit device pointer */
692                                         uint32_t ptr = (uint32_t)mem.device_pointer;
693                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
694                                 }
695
696                                 cuda_pop_context();
697                         }
698                         else {
699                                 mem_alloc(NULL, mem, MEM_READ_ONLY);
700                                 mem_copy_to(mem);
701
702                                 cuda_push_context();
703
704                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
705                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
706                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
707
708                                 cuda_pop_context();
709                         }
710                 }
711                 /* Texture Storage */
712                 else {
713                         CUarray handle = NULL;
714
715                         cuda_push_context();
716
717                         if(mem.data_depth > 1) {
718                                 CUDA_ARRAY3D_DESCRIPTOR desc;
719
720                                 desc.Width = mem.data_width;
721                                 desc.Height = mem.data_height;
722                                 desc.Depth = mem.data_depth;
723                                 desc.Format = format;
724                                 desc.NumChannels = mem.data_elements;
725                                 desc.Flags = 0;
726
727                                 cuda_assert(cuArray3DCreate(&handle, &desc));
728                         }
729                         else {
730                                 CUDA_ARRAY_DESCRIPTOR desc;
731
732                                 desc.Width = mem.data_width;
733                                 desc.Height = mem.data_height;
734                                 desc.Format = format;
735                                 desc.NumChannels = mem.data_elements;
736
737                                 cuda_assert(cuArrayCreate(&handle, &desc));
738                         }
739
740                         if(!handle) {
741                                 cuda_pop_context();
742                                 return;
743                         }
744
745                         /* Allocate 3D, 2D or 1D memory */
746                         if(mem.data_depth > 1) {
747                                 CUDA_MEMCPY3D param;
748                                 memset(&param, 0, sizeof(param));
749                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
750                                 param.dstArray = handle;
751                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
752                                 param.srcHost = (void*)mem.data_pointer;
753                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
754                                 param.WidthInBytes = param.srcPitch;
755                                 param.Height = mem.data_height;
756                                 param.Depth = mem.data_depth;
757
758                                 cuda_assert(cuMemcpy3D(&param));
759                         }
760                         else if(mem.data_height > 1) {
761                                 CUDA_MEMCPY2D param;
762                                 memset(&param, 0, sizeof(param));
763                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
764                                 param.dstArray = handle;
765                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
766                                 param.srcHost = (void*)mem.data_pointer;
767                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
768                                 param.WidthInBytes = param.srcPitch;
769                                 param.Height = mem.data_height;
770
771                                 cuda_assert(cuMemcpy2D(&param));
772                         }
773                         else
774                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
775
776                         /* Fermi and Kepler */
777                         mem.device_pointer = (device_ptr)handle;
778                         mem.device_size = size;
779
780                         stats.mem_alloc(size);
781
782                         /* Bindless Textures - Kepler */
783                         if(has_bindless_textures) {
784                                 int flat_slot = 0;
785                                 if(string_startswith(name, "__tex_image")) {
786                                         int pos =  string(name).rfind("_");
787                                         flat_slot = atoi(name + pos + 1);
788                                 }
789                                 else {
790                                         assert(0);
791                                 }
792
793                                 CUDA_RESOURCE_DESC resDesc;
794                                 memset(&resDesc, 0, sizeof(resDesc));
795                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
796                                 resDesc.res.array.hArray = handle;
797                                 resDesc.flags = 0;
798
799                                 CUDA_TEXTURE_DESC texDesc;
800                                 memset(&texDesc, 0, sizeof(texDesc));
801                                 texDesc.addressMode[0] = address_mode;
802                                 texDesc.addressMode[1] = address_mode;
803                                 texDesc.addressMode[2] = address_mode;
804                                 texDesc.filterMode = filter_mode;
805                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
806
807                                 CUtexObject tex = 0;
808                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
809
810                                 /* Safety check */
811                                 if((uint)tex > UINT_MAX) {
812                                         assert(0);
813                                 }
814
815                                 /* Resize once */
816                                 if(flat_slot >= bindless_mapping.size()) {
817                                         /* Allocate some slots in advance, to reduce amount
818                                          * of re-allocations.
819                                          */
820                                         bindless_mapping.resize(flat_slot + 128);
821                                 }
822
823                                 /* Set Mapping and tag that we need to (re-)upload to device */
824                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
825                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
826                                 need_bindless_mapping = true;
827                         }
828                         /* Regular Textures - Fermi */
829                         else {
830                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
831                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
832                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
833                         }
834
835                         cuda_pop_context();
836                 }
837
838                 /* Fermi, Data and Image Textures */
839                 if(!has_bindless_textures) {
840                         cuda_push_context();
841
842                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
843                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
844                         if(mem.data_depth > 1) {
845                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
846                         }
847
848                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
849
850                         cuda_pop_context();
851                 }
852
853                 /* Fermi and Kepler */
854                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
855         }
856
857         void tex_free(device_memory& mem)
858         {
859                 if(mem.device_pointer) {
860                         if(tex_interp_map[mem.device_pointer]) {
861                                 cuda_push_context();
862                                 cuArrayDestroy((CUarray)mem.device_pointer);
863                                 cuda_pop_context();
864
865                                 /* Free CUtexObject (Bindless Textures) */
866                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
867                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
868                                         cuTexObjectDestroy(flat_slot);
869                                 }
870
871                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
872                                 mem.device_pointer = 0;
873
874                                 stats.mem_free(mem.device_size);
875                                 mem.device_size = 0;
876                         }
877                         else {
878                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
879                                 mem_free(mem);
880                         }
881                 }
882         }
883
884         void path_trace(RenderTile& rtile, int sample, bool branched)
885         {
886                 if(have_error())
887                         return;
888
889                 cuda_push_context();
890
891                 CUfunction cuPathTrace;
892                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
893                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
894
895                 /* get kernel function */
896                 if(branched) {
897                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
898                 }
899                 else {
900                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
901                 }
902
903                 if(have_error())
904                         return;
905
906                 /* pass in parameters */
907                 void *args[] = {&d_buffer,
908                                 &d_rng_state,
909                                 &sample,
910                                 &rtile.x,
911                                 &rtile.y,
912                                 &rtile.w,
913                                 &rtile.h,
914                                 &rtile.offset,
915                                 &rtile.stride};
916
917                 /* launch kernel */
918                 int threads_per_block;
919                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
920
921                 /*int num_registers;
922                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
923
924                 printf("threads_per_block %d\n", threads_per_block);
925                 printf("num_registers %d\n", num_registers);*/
926
927                 int xthreads = (int)sqrt(threads_per_block);
928                 int ythreads = (int)sqrt(threads_per_block);
929                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
930                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
931
932                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
933
934                 cuda_assert(cuLaunchKernel(cuPathTrace,
935                                            xblocks , yblocks, 1, /* blocks */
936                                            xthreads, ythreads, 1, /* threads */
937                                            0, 0, args, 0));
938
939                 cuda_assert(cuCtxSynchronize());
940
941                 cuda_pop_context();
942         }
943
944         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
945         {
946                 if(have_error())
947                         return;
948
949                 cuda_push_context();
950
951                 CUfunction cuFilmConvert;
952                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
953                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
954
955                 /* get kernel function */
956                 if(rgba_half) {
957                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
958                 }
959                 else {
960                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
961                 }
962
963
964                 float sample_scale = 1.0f/(task.sample + 1);
965
966                 /* pass in parameters */
967                 void *args[] = {&d_rgba,
968                                 &d_buffer,
969                                 &sample_scale,
970                                 &task.x,
971                                 &task.y,
972                                 &task.w,
973                                 &task.h,
974                                 &task.offset,
975                                 &task.stride};
976
977                 /* launch kernel */
978                 int threads_per_block;
979                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
980
981                 int xthreads = (int)sqrt(threads_per_block);
982                 int ythreads = (int)sqrt(threads_per_block);
983                 int xblocks = (task.w + xthreads - 1)/xthreads;
984                 int yblocks = (task.h + ythreads - 1)/ythreads;
985
986                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
987
988                 cuda_assert(cuLaunchKernel(cuFilmConvert,
989                                            xblocks , yblocks, 1, /* blocks */
990                                            xthreads, ythreads, 1, /* threads */
991                                            0, 0, args, 0));
992
993                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
994
995                 cuda_pop_context();
996         }
997
998         void shader(DeviceTask& task)
999         {
1000                 if(have_error())
1001                         return;
1002
1003                 cuda_push_context();
1004
1005                 CUfunction cuShader;
1006                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1007                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1008                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
1009
1010                 /* get kernel function */
1011                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1012                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1013                 }
1014                 else {
1015                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
1016                 }
1017
1018                 /* do tasks in smaller chunks, so we can cancel it */
1019                 const int shader_chunk_size = 65536;
1020                 const int start = task.shader_x;
1021                 const int end = task.shader_x + task.shader_w;
1022                 int offset = task.offset;
1023
1024                 bool canceled = false;
1025                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1026                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1027                                 int shader_w = min(shader_chunk_size, end - shader_x);
1028
1029                                 /* pass in parameters */
1030                                 void *args[8];
1031                                 int arg = 0;
1032                                 args[arg++] = &d_input;
1033                                 args[arg++] = &d_output;
1034                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
1035                                         args[arg++] = &d_output_luma;
1036                                 }
1037                                 args[arg++] = &task.shader_eval_type;
1038                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1039                                         args[arg++] = &task.shader_filter;
1040                                 }
1041                                 args[arg++] = &shader_x;
1042                                 args[arg++] = &shader_w;
1043                                 args[arg++] = &offset;
1044                                 args[arg++] = &sample;
1045
1046                                 /* launch kernel */
1047                                 int threads_per_block;
1048                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1049
1050                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1051
1052                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1053                                 cuda_assert(cuLaunchKernel(cuShader,
1054                                                            xblocks , 1, 1, /* blocks */
1055                                                            threads_per_block, 1, 1, /* threads */
1056                                                            0, 0, args, 0));
1057
1058                                 cuda_assert(cuCtxSynchronize());
1059
1060                                 if(task.get_cancel()) {
1061                                         canceled = true;
1062                                         break;
1063                                 }
1064                         }
1065
1066                         task.update_progress(NULL);
1067                 }
1068
1069                 cuda_pop_context();
1070         }
1071
1072         CUdeviceptr map_pixels(device_ptr mem)
1073         {
1074                 if(!background) {
1075                         PixelMem pmem = pixel_mem_map[mem];
1076                         CUdeviceptr buffer;
1077
1078                         size_t bytes;
1079                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1080                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1081
1082                         return buffer;
1083                 }
1084
1085                 return cuda_device_ptr(mem);
1086         }
1087
1088         void unmap_pixels(device_ptr mem)
1089         {
1090                 if(!background) {
1091                         PixelMem pmem = pixel_mem_map[mem];
1092
1093                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1094                 }
1095         }
1096
1097         void pixels_alloc(device_memory& mem)
1098         {
1099                 if(!background) {
1100                         PixelMem pmem;
1101
1102                         pmem.w = mem.data_width;
1103                         pmem.h = mem.data_height;
1104
1105                         cuda_push_context();
1106
1107                         glGenBuffers(1, &pmem.cuPBO);
1108                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1109                         if(mem.data_type == TYPE_HALF)
1110                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1111                         else
1112                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1113
1114                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1115
1116                         glGenTextures(1, &pmem.cuTexId);
1117                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1118                         if(mem.data_type == TYPE_HALF)
1119                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1120                         else
1121                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1122                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1123                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1124                         glBindTexture(GL_TEXTURE_2D, 0);
1125
1126                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1127
1128                         if(result == CUDA_SUCCESS) {
1129                                 cuda_pop_context();
1130
1131                                 mem.device_pointer = pmem.cuTexId;
1132                                 pixel_mem_map[mem.device_pointer] = pmem;
1133
1134                                 mem.device_size = mem.memory_size();
1135                                 stats.mem_alloc(mem.device_size);
1136
1137                                 return;
1138                         }
1139                         else {
1140                                 /* failed to register buffer, fallback to no interop */
1141                                 glDeleteBuffers(1, &pmem.cuPBO);
1142                                 glDeleteTextures(1, &pmem.cuTexId);
1143
1144                                 cuda_pop_context();
1145
1146                                 background = true;
1147                         }
1148                 }
1149
1150                 Device::pixels_alloc(mem);
1151         }
1152
1153         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1154         {
1155                 if(!background) {
1156                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1157
1158                         cuda_push_context();
1159
1160                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1161                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1162                         size_t offset = sizeof(uchar)*4*y*w;
1163                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1164                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1165                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1166
1167                         cuda_pop_context();
1168
1169                         return;
1170                 }
1171
1172                 Device::pixels_copy_from(mem, y, w, h);
1173         }
1174
1175         void pixels_free(device_memory& mem)
1176         {
1177                 if(mem.device_pointer) {
1178                         if(!background) {
1179                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1180
1181                                 cuda_push_context();
1182
1183                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1184                                 glDeleteBuffers(1, &pmem.cuPBO);
1185                                 glDeleteTextures(1, &pmem.cuTexId);
1186
1187                                 cuda_pop_context();
1188
1189                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1190                                 mem.device_pointer = 0;
1191
1192                                 stats.mem_free(mem.device_size);
1193                                 mem.device_size = 0;
1194
1195                                 return;
1196                         }
1197
1198                         Device::pixels_free(mem);
1199                 }
1200         }
1201
1202         void draw_pixels(
1203             device_memory& mem, int y,
1204             int w, int h, int width, int height,
1205             int dx, int dy, int dw, int dh, bool transparent,
1206                 const DeviceDrawParams &draw_params)
1207         {
1208                 if(!background) {
1209                         const bool use_fallback_shader = (draw_params.bind_display_space_shader_cb == NULL);
1210                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1211                         float *vpointer;
1212
1213                         cuda_push_context();
1214
1215                         /* for multi devices, this assumes the inefficient method that we allocate
1216                          * all pixels on the device even though we only render to a subset */
1217                         size_t offset = 4*y*w;
1218
1219                         if(mem.data_type == TYPE_HALF)
1220                                 offset *= sizeof(GLhalf);
1221                         else
1222                                 offset *= sizeof(uint8_t);
1223
1224                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1225                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1226                         if(mem.data_type == TYPE_HALF) {
1227                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1228                         }
1229                         else {
1230                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1231                         }
1232                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1233
1234                         glEnable(GL_TEXTURE_2D);
1235
1236                         if(transparent) {
1237                                 glEnable(GL_BLEND);
1238                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1239                         }
1240
1241                         GLint shader_program;
1242                         if(use_fallback_shader) {
1243                                 if(!bind_fallback_display_space_shader(dw, dh)) {
1244                                         return;
1245                                 }
1246                                 shader_program = fallback_shader_program;
1247                         }
1248                         else {
1249                                 draw_params.bind_display_space_shader_cb();
1250                                 glGetIntegerv(GL_CURRENT_PROGRAM, &shader_program);
1251                         }
1252
1253                         if(!vertex_buffer) {
1254                                 glGenBuffers(1, &vertex_buffer);
1255                         }
1256
1257                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1258                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1259                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1260
1261                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1262
1263                         if(vpointer) {
1264                                 /* texture coordinate - vertex pair */
1265                                 vpointer[0] = 0.0f;
1266                                 vpointer[1] = 0.0f;
1267                                 vpointer[2] = dx;
1268                                 vpointer[3] = dy;
1269
1270                                 vpointer[4] = (float)w/(float)pmem.w;
1271                                 vpointer[5] = 0.0f;
1272                                 vpointer[6] = (float)width + dx;
1273                                 vpointer[7] = dy;
1274
1275                                 vpointer[8] = (float)w/(float)pmem.w;
1276                                 vpointer[9] = (float)h/(float)pmem.h;
1277                                 vpointer[10] = (float)width + dx;
1278                                 vpointer[11] = (float)height + dy;
1279
1280                                 vpointer[12] = 0.0f;
1281                                 vpointer[13] = (float)h/(float)pmem.h;
1282                                 vpointer[14] = dx;
1283                                 vpointer[15] = (float)height + dy;
1284
1285                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1286                         }
1287
1288                         GLuint vertex_array_object;
1289                         GLuint position_attribute, texcoord_attribute;
1290
1291                         glGenVertexArrays(1, &vertex_array_object);
1292                         glBindVertexArray(vertex_array_object);
1293
1294                         texcoord_attribute = glGetAttribLocation(shader_program, "texCoord");
1295                         position_attribute = glGetAttribLocation(shader_program, "pos");
1296
1297                         glEnableVertexAttribArray(texcoord_attribute);
1298                         glEnableVertexAttribArray(position_attribute);
1299
1300                         glVertexAttribPointer(texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
1301                         glVertexAttribPointer(position_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)(sizeof(float) * 2));
1302
1303                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1304
1305                         if(use_fallback_shader) {
1306                                 glUseProgram(0);
1307                         }
1308                         else {
1309                                 draw_params.unbind_display_space_shader_cb();
1310                         }
1311
1312                         if(transparent) {
1313                                 glDisable(GL_BLEND);
1314                         }
1315
1316                         glBindTexture(GL_TEXTURE_2D, 0);
1317                         glDisable(GL_TEXTURE_2D);
1318
1319                         cuda_pop_context();
1320
1321                         return;
1322                 }
1323
1324                 Device::draw_pixels(mem, y, w, h, width, height, dx, dy, dw, dh, transparent, draw_params);
1325         }
1326
1327         void thread_run(DeviceTask *task)
1328         {
1329                 if(task->type == DeviceTask::PATH_TRACE) {
1330                         RenderTile tile;
1331
1332                         bool branched = task->integrator_branched;
1333
1334                         /* Upload Bindless Mapping */
1335                         load_bindless_mapping();
1336
1337                         if(!use_split_kernel()) {
1338                                 /* keep rendering tiles until done */
1339                                 while(task->acquire_tile(this, tile)) {
1340                                         int start_sample = tile.start_sample;
1341                                         int end_sample = tile.start_sample + tile.num_samples;
1342
1343                                         for(int sample = start_sample; sample < end_sample; sample++) {
1344                                                 if(task->get_cancel()) {
1345                                                         if(task->need_finish_queue == false)
1346                                                                 break;
1347                                                 }
1348
1349                                                 path_trace(tile, sample, branched);
1350
1351                                                 tile.sample = sample + 1;
1352
1353                                                 task->update_progress(&tile, tile.w*tile.h);
1354                                         }
1355
1356                                         task->release_tile(tile);
1357                                 }
1358                         }
1359                         else {
1360                                 DeviceRequestedFeatures requested_features;
1361                                 if(!use_adaptive_compilation()) {
1362                                         requested_features.max_closure = 64;
1363                                 }
1364
1365                                 if(split_kernel == NULL) {
1366                                         split_kernel = new CUDASplitKernel(this);
1367                                         split_kernel->load_kernels(requested_features);
1368                                 }
1369
1370                                 while(task->acquire_tile(this, tile)) {
1371                                         device_memory void_buffer;
1372                                         split_kernel->path_trace(task, tile, void_buffer, void_buffer);
1373
1374                                         task->release_tile(tile);
1375
1376                                         if(task->get_cancel()) {
1377                                                 if(task->need_finish_queue == false)
1378                                                         break;
1379                                         }
1380                                 }
1381                         }
1382                 }
1383                 else if(task->type == DeviceTask::SHADER) {
1384                         /* Upload Bindless Mapping */
1385                         load_bindless_mapping();
1386
1387                         shader(*task);
1388
1389                         cuda_push_context();
1390                         cuda_assert(cuCtxSynchronize());
1391                         cuda_pop_context();
1392                 }
1393         }
1394
1395         class CUDADeviceTask : public DeviceTask {
1396         public:
1397                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1398                 : DeviceTask(task)
1399                 {
1400                         run = function_bind(&CUDADevice::thread_run, device, this);
1401                 }
1402         };
1403
1404         int get_split_task_count(DeviceTask& /*task*/)
1405         {
1406                 return 1;
1407         }
1408
1409         void task_add(DeviceTask& task)
1410         {
1411                 if(task.type == DeviceTask::FILM_CONVERT) {
1412                         /* must be done in main thread due to opengl access */
1413                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1414
1415                         cuda_push_context();
1416                         cuda_assert(cuCtxSynchronize());
1417                         cuda_pop_context();
1418                 }
1419                 else {
1420                         task_pool.push(new CUDADeviceTask(this, task));
1421                 }
1422         }
1423
1424         void task_wait()
1425         {
1426                 task_pool.wait();
1427         }
1428
1429         void task_cancel()
1430         {
1431                 task_pool.cancel();
1432         }
1433
1434         friend class CUDASplitKernelFunction;
1435         friend class CUDASplitKernel;
1436 };
1437
1438 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1439  * now that the definition of that class is complete
1440  */
1441 #undef cuda_assert
1442 #define cuda_assert(stmt) \
1443         { \
1444                 CUresult result = stmt; \
1445                 \
1446                 if(result != CUDA_SUCCESS) { \
1447                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1448                         if(device->error_msg == "") \
1449                                 device->error_msg = message; \
1450                         fprintf(stderr, "%s\n", message.c_str()); \
1451                         /*cuda_abort();*/ \
1452                         device->cuda_error_documentation(); \
1453                 } \
1454         } (void)0
1455
1456 /* split kernel */
1457
1458 class CUDASplitKernelFunction : public SplitKernelFunction{
1459         CUDADevice* device;
1460         CUfunction func;
1461 public:
1462         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
1463
1464         /* enqueue the kernel, returns false if there is an error */
1465         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
1466         {
1467                 return enqueue(dim, NULL);
1468         }
1469
1470         /* enqueue the kernel, returns false if there is an error */
1471         bool enqueue(const KernelDimensions &dim, void *args[])
1472         {
1473                 device->cuda_push_context();
1474
1475                 if(device->have_error())
1476                         return false;
1477
1478                 /* we ignore dim.local_size for now, as this is faster */
1479                 int threads_per_block;
1480                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
1481
1482                 int xthreads = (int)sqrt(threads_per_block);
1483                 int ythreads = (int)sqrt(threads_per_block);
1484
1485                 int xblocks = (dim.global_size[0] + xthreads - 1)/xthreads;
1486                 int yblocks = (dim.global_size[1] + ythreads - 1)/ythreads;
1487
1488                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
1489
1490                 cuda_assert(cuLaunchKernel(func,
1491                                            xblocks , yblocks, 1, /* blocks */
1492                                            xthreads, ythreads, 1, /* threads */
1493                                            0, 0, args, 0));
1494
1495                 device->cuda_pop_context();
1496
1497                 return !device->have_error();
1498         }
1499 };
1500
1501 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
1502 {
1503 }
1504
1505 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
1506 {
1507         device_vector<uint64_t> size_buffer;
1508         size_buffer.resize(1);
1509         device->mem_alloc(NULL, size_buffer, MEM_READ_WRITE);
1510
1511         device->cuda_push_context();
1512
1513         uint threads = num_threads;
1514         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
1515
1516         struct args_t {
1517                 uint* num_threads;
1518                 CUdeviceptr* size;
1519         };
1520
1521         args_t args = {
1522                 &threads,
1523                 &d_size
1524         };
1525
1526         CUfunction state_buffer_size;
1527         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
1528
1529         cuda_assert(cuLaunchKernel(state_buffer_size,
1530                                    1, 1, 1,
1531                                    1, 1, 1,
1532                                    0, 0, (void**)&args, 0));
1533
1534         device->cuda_pop_context();
1535
1536         device->mem_copy_from(size_buffer, 0, 1, 1, sizeof(uint64_t));
1537         device->mem_free(size_buffer);
1538
1539         return *size_buffer.get_data();
1540 }
1541
1542 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
1543                                     RenderTile& rtile,
1544                                     int num_global_elements,
1545                                     device_memory& /*kernel_globals*/,
1546                                     device_memory& /*kernel_data*/,
1547                                     device_memory& split_data,
1548                                     device_memory& ray_state,
1549                                     device_memory& queue_index,
1550                                     device_memory& use_queues_flag,
1551                                     device_memory& work_pool_wgs)
1552 {
1553         device->cuda_push_context();
1554
1555         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
1556         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
1557         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
1558         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
1559         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
1560
1561         CUdeviceptr d_rng_state = device->cuda_device_ptr(rtile.rng_state);
1562         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
1563
1564         int end_sample = rtile.start_sample + rtile.num_samples;
1565         int queue_size = dim.global_size[0] * dim.global_size[1];
1566
1567         struct args_t {
1568                 CUdeviceptr* split_data_buffer;
1569                 int* num_elements;
1570                 CUdeviceptr* ray_state;
1571                 CUdeviceptr* rng_state;
1572                 int* start_sample;
1573                 int* end_sample;
1574                 int* sx;
1575                 int* sy;
1576                 int* sw;
1577                 int* sh;
1578                 int* offset;
1579                 int* stride;
1580                 CUdeviceptr* queue_index;
1581                 int* queuesize;
1582                 CUdeviceptr* use_queues_flag;
1583                 CUdeviceptr* work_pool_wgs;
1584                 int* num_samples;
1585                 CUdeviceptr* buffer;
1586         };
1587
1588         args_t args = {
1589                 &d_split_data,
1590                 &num_global_elements,
1591                 &d_ray_state,
1592                 &d_rng_state,
1593                 &rtile.start_sample,
1594                 &end_sample,
1595                 &rtile.x,
1596                 &rtile.y,
1597                 &rtile.w,
1598                 &rtile.h,
1599                 &rtile.offset,
1600                 &rtile.stride,
1601                 &d_queue_index,
1602                 &queue_size,
1603                 &d_use_queues_flag,
1604                 &d_work_pool_wgs,
1605                 &rtile.num_samples,
1606                 &d_buffer
1607         };
1608
1609         CUfunction data_init;
1610         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
1611         if(device->have_error()) {
1612                 return false;
1613         }
1614
1615         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
1616
1617         device->cuda_pop_context();
1618
1619         return !device->have_error();
1620 }
1621
1622 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&)
1623 {
1624         CUfunction func;
1625
1626         device->cuda_push_context();
1627
1628         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
1629         if(device->have_error()) {
1630                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
1631                 return NULL;
1632         }
1633
1634         device->cuda_pop_context();
1635
1636         return new CUDASplitKernelFunction(device, func);
1637 }
1638
1639 int2 CUDASplitKernel::split_kernel_local_size()
1640 {
1641         return make_int2(32, 1);
1642 }
1643
1644 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
1645 {
1646         size_t free;
1647         size_t total;
1648
1649         device->cuda_push_context();
1650         cuda_assert(cuMemGetInfo(&free, &total));
1651         device->cuda_pop_context();
1652
1653         VLOG(1) << "Maximum device allocation size: "
1654                 << string_human_readable_number(free) << " bytes. ("
1655                 << string_human_readable_size(free) << ").";
1656
1657         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
1658         size_t side = round_down((int)sqrt(num_elements), 32);
1659         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
1660         VLOG(1) << "Global size: " << global_size << ".";
1661         return global_size;
1662 }
1663
1664 bool device_cuda_init(void)
1665 {
1666 #ifdef WITH_CUDA_DYNLOAD
1667         static bool initialized = false;
1668         static bool result = false;
1669
1670         if(initialized)
1671                 return result;
1672
1673         initialized = true;
1674         int cuew_result = cuewInit();
1675         if(cuew_result == CUEW_SUCCESS) {
1676                 VLOG(1) << "CUEW initialization succeeded";
1677                 if(CUDADevice::have_precompiled_kernels()) {
1678                         VLOG(1) << "Found precompiled kernels";
1679                         result = true;
1680                 }
1681 #ifndef _WIN32
1682                 else if(cuewCompilerPath() != NULL) {
1683                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
1684                         result = true;
1685                 }
1686                 else {
1687                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1688                                 << " unable to use CUDA";
1689                 }
1690 #endif
1691         }
1692         else {
1693                 VLOG(1) << "CUEW initialization failed: "
1694                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1695                             ? "Error setting up atexit() handler"
1696                             : "Error opening the library");
1697         }
1698
1699         return result;
1700 #else  /* WITH_CUDA_DYNLOAD */
1701         return true;
1702 #endif /* WITH_CUDA_DYNLOAD */
1703 }
1704
1705 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1706 {
1707         return new CUDADevice(info, stats, background);
1708 }
1709
1710 void device_cuda_info(vector<DeviceInfo>& devices)
1711 {
1712         CUresult result;
1713         int count = 0;
1714
1715         result = cuInit(0);
1716         if(result != CUDA_SUCCESS) {
1717                 if(result != CUDA_ERROR_NO_DEVICE)
1718                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1719                 return;
1720         }
1721
1722         result = cuDeviceGetCount(&count);
1723         if(result != CUDA_SUCCESS) {
1724                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1725                 return;
1726         }
1727
1728         vector<DeviceInfo> display_devices;
1729
1730         for(int num = 0; num < count; num++) {
1731                 char name[256];
1732                 int attr;
1733
1734                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1735                         continue;
1736
1737                 int major;
1738                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
1739                 if(major < 2) {
1740                         continue;
1741                 }
1742
1743                 DeviceInfo info;
1744
1745                 info.type = DEVICE_CUDA;
1746                 info.description = string(name);
1747                 info.num = num;
1748
1749                 info.advanced_shading = (major >= 2);
1750                 info.has_bindless_textures = (major >= 3);
1751                 info.pack_images = false;
1752
1753                 int pci_location[3] = {0, 0, 0};
1754                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
1755                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
1756                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
1757                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
1758                                         name,
1759                                         (unsigned int)pci_location[0],
1760                                         (unsigned int)pci_location[1],
1761                                         (unsigned int)pci_location[2]);
1762
1763                 /* if device has a kernel timeout, assume it is used for display */
1764                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1765                         info.description += " (Display)";
1766                         info.display_device = true;
1767                         display_devices.push_back(info);
1768                 }
1769                 else
1770                         devices.push_back(info);
1771         }
1772
1773         if(!display_devices.empty())
1774                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1775 }
1776
1777 string device_cuda_capabilities(void)
1778 {
1779         CUresult result = cuInit(0);
1780         if(result != CUDA_SUCCESS) {
1781                 if(result != CUDA_ERROR_NO_DEVICE) {
1782                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1783                 }
1784                 return "No CUDA device found\n";
1785         }
1786
1787         int count;
1788         result = cuDeviceGetCount(&count);
1789         if(result != CUDA_SUCCESS) {
1790                 return string("Error getting devices: ") + cuewErrorString(result);
1791         }
1792
1793         string capabilities = "";
1794         for(int num = 0; num < count; num++) {
1795                 char name[256];
1796                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1797                         continue;
1798                 }
1799                 capabilities += string("\t") + name + "\n";
1800                 int value;
1801 #define GET_ATTR(attr) \
1802                 { \
1803                         if(cuDeviceGetAttribute(&value, \
1804                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1805                                                 num) == CUDA_SUCCESS) \
1806                         { \
1807                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1808                                                               value); \
1809                         } \
1810                 } (void)0
1811                 /* TODO(sergey): Strip all attributes which are not useful for us
1812                  * or does not depend on the driver.
1813                  */
1814                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1815                 GET_ATTR(MAX_BLOCK_DIM_X);
1816                 GET_ATTR(MAX_BLOCK_DIM_Y);
1817                 GET_ATTR(MAX_BLOCK_DIM_Z);
1818                 GET_ATTR(MAX_GRID_DIM_X);
1819                 GET_ATTR(MAX_GRID_DIM_Y);
1820                 GET_ATTR(MAX_GRID_DIM_Z);
1821                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1822                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1823                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1824                 GET_ATTR(WARP_SIZE);
1825                 GET_ATTR(MAX_PITCH);
1826                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1827                 GET_ATTR(REGISTERS_PER_BLOCK);
1828                 GET_ATTR(CLOCK_RATE);
1829                 GET_ATTR(TEXTURE_ALIGNMENT);
1830                 GET_ATTR(GPU_OVERLAP);
1831                 GET_ATTR(MULTIPROCESSOR_COUNT);
1832                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1833                 GET_ATTR(INTEGRATED);
1834                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1835                 GET_ATTR(COMPUTE_MODE);
1836                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1837                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1838                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1839                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1840                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1841                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1842                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1843                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1844                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1845                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1846                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1847                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1848                 GET_ATTR(SURFACE_ALIGNMENT);
1849                 GET_ATTR(CONCURRENT_KERNELS);
1850                 GET_ATTR(ECC_ENABLED);
1851                 GET_ATTR(TCC_DRIVER);
1852                 GET_ATTR(MEMORY_CLOCK_RATE);
1853                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1854                 GET_ATTR(L2_CACHE_SIZE);
1855                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1856                 GET_ATTR(ASYNC_ENGINE_COUNT);
1857                 GET_ATTR(UNIFIED_ADDRESSING);
1858                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1859                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1860                 GET_ATTR(CAN_TEX2D_GATHER);
1861                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1862                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1863                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1864                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1865                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1866                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1867                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1868                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1869                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1870                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1871                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1872                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1873                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1874                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1875                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1876                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1877                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1878                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1879                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1880                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1881                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1882                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1883                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1884                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1885                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1886                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1887                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1888                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1889                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1890                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1891                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1892                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1893                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1894                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1895                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1896                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1897                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1898                 GET_ATTR(MANAGED_MEMORY);
1899                 GET_ATTR(MULTI_GPU_BOARD);
1900                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1901 #undef GET_ATTR
1902                 capabilities += "\n";
1903         }
1904
1905         return capabilities;
1906 }
1907
1908 CCL_NAMESPACE_END