Fix/workaround static object initialization in gflags
[blender-staging.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #ifdef WITH_CUDA_DYNLOAD
27 #  include "cuew.h"
28 #else
29 #  include "util_opengl.h"
30 #  include <cuda.h>
31 #  include <cudaGL.h>
32 #endif
33 #include "util_debug.h"
34 #include "util_logging.h"
35 #include "util_map.h"
36 #include "util_md5.h"
37 #include "util_opengl.h"
38 #include "util_path.h"
39 #include "util_string.h"
40 #include "util_system.h"
41 #include "util_types.h"
42 #include "util_time.h"
43
44 /* use feature-adaptive kernel compilation.
45  * Requires CUDA toolkit to be installed and currently only works on Linux.
46  */
47 /* #define KERNEL_USE_ADAPTIVE */
48
49 CCL_NAMESPACE_BEGIN
50
51 #ifndef WITH_CUDA_DYNLOAD
52
53 /* Transparently implement some functions, so majority of the file does not need
54  * to worry about difference between dynamically loaded and linked CUDA at all.
55  */
56
57 namespace {
58
59 const char *cuewErrorString(CUresult result)
60 {
61         /* We can only give error code here without major code duplication, that
62          * should be enough since dynamic loading is only being disabled by folks
63          * who knows what they're doing anyway.
64          *
65          * NOTE: Avoid call from several threads.
66          */
67         static string error;
68         error = string_printf("%d", result);
69         return error.c_str();
70 }
71
72 const char *cuewCompilerPath(void)
73 {
74         return CYCLES_CUDA_NVCC_EXECUTABLE;
75 }
76
77 int cuewCompilerVersion(void)
78 {
79         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
80 }
81
82 }  /* namespace */
83 #endif  /* WITH_CUDA_DYNLOAD */
84
85 class CUDADevice : public Device
86 {
87 public:
88         DedicatedTaskPool task_pool;
89         CUdevice cuDevice;
90         CUcontext cuContext;
91         CUmodule cuModule;
92         map<device_ptr, bool> tex_interp_map;
93         int cuDevId;
94         int cuDevArchitecture;
95         bool first_error;
96         bool use_texture_storage;
97
98         struct PixelMem {
99                 GLuint cuPBO;
100                 CUgraphicsResource cuPBOresource;
101                 GLuint cuTexId;
102                 int w, h;
103         };
104
105         map<device_ptr, PixelMem> pixel_mem_map;
106
107         CUdeviceptr cuda_device_ptr(device_ptr mem)
108         {
109                 return (CUdeviceptr)mem;
110         }
111
112         static bool have_precompiled_kernels()
113         {
114                 string cubins_path = path_get("lib");
115                 return path_exists(cubins_path);
116         }
117
118 /*#ifdef NDEBUG
119 #define cuda_abort()
120 #else
121 #define cuda_abort() abort()
122 #endif*/
123         void cuda_error_documentation()
124         {
125                 if(first_error) {
126                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
127                         fprintf(stderr, "http://www.blender.org/manual/render/cycles/gpu_rendering.html\n\n");
128                         first_error = false;
129                 }
130         }
131
132 #define cuda_assert(stmt) \
133         { \
134                 CUresult result = stmt; \
135                 \
136                 if(result != CUDA_SUCCESS) { \
137                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
138                         if(error_msg == "") \
139                                 error_msg = message; \
140                         fprintf(stderr, "%s\n", message.c_str()); \
141                         /*cuda_abort();*/ \
142                         cuda_error_documentation(); \
143                 } \
144         } (void)0
145
146         bool cuda_error_(CUresult result, const string& stmt)
147         {
148                 if(result == CUDA_SUCCESS)
149                         return false;
150
151                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
152                 if(error_msg == "")
153                         error_msg = message;
154                 fprintf(stderr, "%s\n", message.c_str());
155                 cuda_error_documentation();
156                 return true;
157         }
158
159 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
160
161         void cuda_error_message(const string& message)
162         {
163                 if(error_msg == "")
164                         error_msg = message;
165                 fprintf(stderr, "%s\n", message.c_str());
166                 cuda_error_documentation();
167         }
168
169         void cuda_push_context()
170         {
171                 cuda_assert(cuCtxSetCurrent(cuContext));
172         }
173
174         void cuda_pop_context()
175         {
176                 cuda_assert(cuCtxSetCurrent(NULL));
177         }
178
179         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
180         : Device(info, stats, background_)
181         {
182                 first_error = true;
183                 background = background_;
184                 use_texture_storage = true;
185
186                 cuDevId = info.num;
187                 cuDevice = 0;
188                 cuContext = 0;
189
190                 /* intialize */
191                 if(cuda_error(cuInit(0)))
192                         return;
193
194                 /* setup device and context */
195                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
196                         return;
197
198                 CUresult result;
199
200                 if(background) {
201                         result = cuCtxCreate(&cuContext, 0, cuDevice);
202                 }
203                 else {
204                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
205
206                         if(result != CUDA_SUCCESS) {
207                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
208                                 background = true;
209                         }
210                 }
211
212                 if(cuda_error_(result, "cuCtxCreate"))
213                         return;
214
215                 int major, minor;
216                 cuDeviceComputeCapability(&major, &minor, cuDevId);
217                 cuDevArchitecture = major*100 + minor*10;
218
219                 /* In order to use full 6GB of memory on Titan cards, use arrays instead
220                  * of textures. On earlier cards this seems slower, but on Titan it is
221                  * actually slightly faster in tests. */
222                 use_texture_storage = (cuDevArchitecture < 300);
223
224                 cuda_pop_context();
225         }
226
227         ~CUDADevice()
228         {
229                 task_pool.stop();
230
231                 cuda_assert(cuCtxDestroy(cuContext));
232         }
233
234         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
235         {
236                 int major, minor;
237                 cuDeviceComputeCapability(&major, &minor, cuDevId);
238
239                 /* We only support sm_20 and above */
240                 if(major < 2) {
241                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
242                         return false;
243                 }
244
245                 return true;
246         }
247
248         string compile_kernel(const DeviceRequestedFeatures& requested_features)
249         {
250                 /* compute cubin name */
251                 int major, minor;
252                 cuDeviceComputeCapability(&major, &minor, cuDevId);
253                 string cubin;
254
255                 /* attempt to use kernel provided with blender */
256                 cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
257                 VLOG(1) << "Testing for pre-compiled kernel " << cubin;
258                 if(path_exists(cubin)) {
259                         VLOG(1) << "Using precompiled kernel";
260                         return cubin;
261                 }
262
263                 /* not found, try to use locally compiled kernel */
264                 string kernel_path = path_get("kernel");
265                 string md5 = path_files_md5_hash(kernel_path);
266
267 #ifdef KERNEL_USE_ADAPTIVE
268                 string feature_build_options = requested_features.get_build_options();
269                 string device_md5 = util_md5_string(feature_build_options);
270                 cubin = string_printf("cycles_kernel_%s_sm%d%d_%s.cubin",
271                                       device_md5.c_str(),
272                                       major, minor,
273                                       md5.c_str());
274 #else
275                 (void)requested_features;
276                 cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
277 #endif
278
279                 cubin = path_user_get(path_join("cache", cubin));
280                 VLOG(1) << "Testing for locally compiled kernel " << cubin;
281                 /* if exists already, use it */
282                 if(path_exists(cubin)) {
283                         VLOG(1) << "Using locally compiled kernel";
284                         return cubin;
285                 }
286
287 #ifdef _WIN32
288                 if(have_precompiled_kernels()) {
289                         if(major < 2)
290                                 cuda_error_message(string_printf("CUDA device requires compute capability 2.0 or up, found %d.%d. Your GPU is not supported.", major, minor));
291                         else
292                                 cuda_error_message(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
293                         return "";
294                 }
295 #endif
296
297                 /* if not, find CUDA compiler */
298                 const char *nvcc = cuewCompilerPath();
299
300                 if(nvcc == NULL) {
301                         cuda_error_message("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
302                         return "";
303                 }
304
305                 int cuda_version = cuewCompilerVersion();
306                 VLOG(1) << "Found nvcc " << nvcc << ", CUDA version " << cuda_version;
307
308                 if(cuda_version == 0) {
309                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
310                         return "";
311                 }
312                 if(cuda_version < 60) {
313                         printf("Unsupported CUDA version %d.%d detected, you need CUDA 6.5.\n", cuda_version/10, cuda_version%10);
314                         return "";
315                 }
316                 else if(cuda_version != 65)
317                         printf("CUDA version %d.%d detected, build may succeed but only CUDA 6.5 is officially supported.\n", cuda_version/10, cuda_version%10);
318
319                 /* compile */
320                 string kernel = path_join(kernel_path, path_join("kernels", path_join("cuda", "kernel.cu")));
321                 string include = kernel_path;
322                 const int machine = system_cpu_bits();
323
324                 double starttime = time_dt();
325                 printf("Compiling CUDA kernel ...\n");
326
327                 path_create_directories(cubin);
328
329                 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
330                         "-o \"%s\" --ptxas-options=\"-v\" --use_fast_math -I\"%s\" "
331                         "-DNVCC -D__KERNEL_CUDA_VERSION__=%d",
332                         nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
333
334 #ifdef KERNEL_USE_ADAPTIVE
335                 command += " " + feature_build_options;
336 #endif
337
338                 const char* extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
339                 if(extra_cflags) {
340                         command += string(" ") + string(extra_cflags);
341                 }
342
343 #ifdef WITH_CYCLES_DEBUG
344                 command += " -D__KERNEL_DEBUG__";
345 #endif
346
347                 printf("%s\n", command.c_str());
348
349                 if(system(command.c_str()) == -1) {
350                         cuda_error_message("Failed to execute compilation command, see console for details.");
351                         return "";
352                 }
353
354                 /* verify if compilation succeeded */
355                 if(!path_exists(cubin)) {
356                         cuda_error_message("CUDA kernel compilation failed, see console for details.");
357                         return "";
358                 }
359
360                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
361
362                 return cubin;
363         }
364
365         bool load_kernels(const DeviceRequestedFeatures& requested_features)
366         {
367                 /* check if cuda init succeeded */
368                 if(cuContext == 0)
369                         return false;
370
371                 /* check if GPU is supported */
372                 if(!support_device(requested_features))
373                         return false;
374
375                 /* get kernel */
376                 string cubin = compile_kernel(requested_features);
377
378                 if(cubin == "")
379                         return false;
380
381                 /* open module */
382                 cuda_push_context();
383
384                 string cubin_data;
385                 CUresult result;
386
387                 if(path_read_text(cubin, cubin_data))
388                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
389                 else
390                         result = CUDA_ERROR_FILE_NOT_FOUND;
391
392                 if(cuda_error_(result, "cuModuleLoad"))
393                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
394
395                 cuda_pop_context();
396
397                 return (result == CUDA_SUCCESS);
398         }
399
400         void mem_alloc(device_memory& mem, MemoryType /*type*/)
401         {
402                 cuda_push_context();
403                 CUdeviceptr device_pointer;
404                 size_t size = mem.memory_size();
405                 cuda_assert(cuMemAlloc(&device_pointer, size));
406                 mem.device_pointer = (device_ptr)device_pointer;
407                 mem.device_size = size;
408                 stats.mem_alloc(size);
409                 cuda_pop_context();
410         }
411
412         void mem_copy_to(device_memory& mem)
413         {
414                 cuda_push_context();
415                 if(mem.device_pointer)
416                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
417                 cuda_pop_context();
418         }
419
420         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
421         {
422                 size_t offset = elem*y*w;
423                 size_t size = elem*w*h;
424
425                 cuda_push_context();
426                 if(mem.device_pointer) {
427                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
428                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
429                 }
430                 else {
431                         memset((char*)mem.data_pointer + offset, 0, size);
432                 }
433                 cuda_pop_context();
434         }
435
436         void mem_zero(device_memory& mem)
437         {
438                 memset((void*)mem.data_pointer, 0, mem.memory_size());
439
440                 cuda_push_context();
441                 if(mem.device_pointer)
442                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
443                 cuda_pop_context();
444         }
445
446         void mem_free(device_memory& mem)
447         {
448                 if(mem.device_pointer) {
449                         cuda_push_context();
450                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
451                         cuda_pop_context();
452
453                         mem.device_pointer = 0;
454
455                         stats.mem_free(mem.device_size);
456                         mem.device_size = 0;
457                 }
458         }
459
460         void const_copy_to(const char *name, void *host, size_t size)
461         {
462                 CUdeviceptr mem;
463                 size_t bytes;
464
465                 cuda_push_context();
466                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
467                 //assert(bytes == size);
468                 cuda_assert(cuMemcpyHtoD(mem, host, size));
469                 cuda_pop_context();
470         }
471
472         void tex_alloc(const char *name,
473                        device_memory& mem,
474                        InterpolationType interpolation,
475                        ExtensionType extension)
476         {
477                 VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
478
479                 string bind_name = name;
480                 if(mem.data_depth > 1) {
481                         /* Kernel uses different bind names for 2d and 3d float textures,
482                          * so we have to adjust couple of things here.
483                          */
484                         vector<string> tokens;
485                         string_split(tokens, name, "_");
486                         bind_name = string_printf("__tex_image_%s3d_%s",
487                                                   tokens[2].c_str(),
488                                                   tokens[3].c_str());
489                 }
490
491                 /* determine format */
492                 CUarray_format_enum format;
493                 size_t dsize = datatype_size(mem.data_type);
494                 size_t size = mem.memory_size();
495                 bool use_texture = (interpolation != INTERPOLATION_NONE) || use_texture_storage;
496
497                 if(use_texture) {
498
499                         switch(mem.data_type) {
500                                 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
501                                 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
502                                 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
503                                 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
504                                 default: assert(0); return;
505                         }
506
507                         CUtexref texref = NULL;
508
509                         cuda_push_context();
510                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
511
512                         if(!texref) {
513                                 cuda_pop_context();
514                                 return;
515                         }
516
517                         if(interpolation != INTERPOLATION_NONE) {
518                                 CUarray handle = NULL;
519
520                                 if(mem.data_depth > 1) {
521                                         CUDA_ARRAY3D_DESCRIPTOR desc;
522
523                                         desc.Width = mem.data_width;
524                                         desc.Height = mem.data_height;
525                                         desc.Depth = mem.data_depth;
526                                         desc.Format = format;
527                                         desc.NumChannels = mem.data_elements;
528                                         desc.Flags = 0;
529
530                                         cuda_assert(cuArray3DCreate(&handle, &desc));
531                                 }
532                                 else {
533                                         CUDA_ARRAY_DESCRIPTOR desc;
534
535                                         desc.Width = mem.data_width;
536                                         desc.Height = mem.data_height;
537                                         desc.Format = format;
538                                         desc.NumChannels = mem.data_elements;
539
540                                         cuda_assert(cuArrayCreate(&handle, &desc));
541                                 }
542
543                                 if(!handle) {
544                                         cuda_pop_context();
545                                         return;
546                                 }
547
548                                 if(mem.data_depth > 1) {
549                                         CUDA_MEMCPY3D param;
550                                         memset(&param, 0, sizeof(param));
551                                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
552                                         param.dstArray = handle;
553                                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
554                                         param.srcHost = (void*)mem.data_pointer;
555                                         param.srcPitch = mem.data_width*dsize*mem.data_elements;
556                                         param.WidthInBytes = param.srcPitch;
557                                         param.Height = mem.data_height;
558                                         param.Depth = mem.data_depth;
559
560                                         cuda_assert(cuMemcpy3D(&param));
561                                 }
562                                 if(mem.data_height > 1) {
563                                         CUDA_MEMCPY2D param;
564                                         memset(&param, 0, sizeof(param));
565                                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
566                                         param.dstArray = handle;
567                                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
568                                         param.srcHost = (void*)mem.data_pointer;
569                                         param.srcPitch = mem.data_width*dsize*mem.data_elements;
570                                         param.WidthInBytes = param.srcPitch;
571                                         param.Height = mem.data_height;
572
573                                         cuda_assert(cuMemcpy2D(&param));
574                                 }
575                                 else
576                                         cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
577
578                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
579
580                                 if(interpolation == INTERPOLATION_CLOSEST) {
581                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
582                                 }
583                                 else if(interpolation == INTERPOLATION_LINEAR) {
584                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
585                                 }
586                                 else {/* CUBIC and SMART are unsupported for CUDA */
587                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
588                                 }
589                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
590
591                                 mem.device_pointer = (device_ptr)handle;
592                                 mem.device_size = size;
593
594                                 stats.mem_alloc(size);
595                         }
596                         else {
597                                 cuda_pop_context();
598
599                                 mem_alloc(mem, MEM_READ_ONLY);
600                                 mem_copy_to(mem);
601
602                                 cuda_push_context();
603
604                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
605                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
606                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
607                         }
608
609                         switch(extension) {
610                                 case EXTENSION_REPEAT:
611                                         cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_WRAP));
612                                         cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_WRAP));
613                                         break;
614                                 case EXTENSION_EXTEND:
615                                         cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_CLAMP));
616                                         cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_CLAMP));
617                                         break;
618                                 case EXTENSION_CLIP:
619                                         cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_BORDER));
620                                         cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_BORDER));
621                                         break;
622                                 default:
623                                         assert(0);
624                         }
625                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
626
627                         cuda_pop_context();
628                 }
629                 else {
630                         mem_alloc(mem, MEM_READ_ONLY);
631                         mem_copy_to(mem);
632
633                         cuda_push_context();
634
635                         CUdeviceptr cumem;
636                         size_t cubytes;
637
638                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
639
640                         if(cubytes == 8) {
641                                 /* 64 bit device pointer */
642                                 uint64_t ptr = mem.device_pointer;
643                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
644                         }
645                         else {
646                                 /* 32 bit device pointer */
647                                 uint32_t ptr = (uint32_t)mem.device_pointer;
648                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
649                         }
650
651                         cuda_pop_context();
652                 }
653
654                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
655         }
656
657         void tex_free(device_memory& mem)
658         {
659                 if(mem.device_pointer) {
660                         if(tex_interp_map[mem.device_pointer]) {
661                                 cuda_push_context();
662                                 cuArrayDestroy((CUarray)mem.device_pointer);
663                                 cuda_pop_context();
664
665                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
666                                 mem.device_pointer = 0;
667
668                                 stats.mem_free(mem.device_size);
669                                 mem.device_size = 0;
670                         }
671                         else {
672                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
673                                 mem_free(mem);
674                         }
675                 }
676         }
677
678         void path_trace(RenderTile& rtile, int sample, bool branched)
679         {
680                 if(have_error())
681                         return;
682
683                 cuda_push_context();
684
685                 CUfunction cuPathTrace;
686                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
687                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
688
689                 /* get kernel function */
690                 if(branched) {
691                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
692                 }
693                 else {
694                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
695                 }
696
697                 if(have_error())
698                         return;
699
700                 /* pass in parameters */
701                 void *args[] = {&d_buffer,
702                                 &d_rng_state,
703                                 &sample,
704                                 &rtile.x,
705                                 &rtile.y,
706                                 &rtile.w,
707                                 &rtile.h,
708                                 &rtile.offset,
709                                 &rtile.stride};
710
711                 /* launch kernel */
712                 int threads_per_block;
713                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
714
715                 /*int num_registers;
716                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
717
718                 printf("threads_per_block %d\n", threads_per_block);
719                 printf("num_registers %d\n", num_registers);*/
720
721                 int xthreads = (int)sqrt((float)threads_per_block);
722                 int ythreads = (int)sqrt((float)threads_per_block);
723                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
724                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
725
726                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
727
728                 cuda_assert(cuLaunchKernel(cuPathTrace,
729                                            xblocks , yblocks, 1, /* blocks */
730                                            xthreads, ythreads, 1, /* threads */
731                                            0, 0, args, 0));
732
733                 cuda_assert(cuCtxSynchronize());
734
735                 cuda_pop_context();
736         }
737
738         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
739         {
740                 if(have_error())
741                         return;
742
743                 cuda_push_context();
744
745                 CUfunction cuFilmConvert;
746                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
747                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
748
749                 /* get kernel function */
750                 if(rgba_half) {
751                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
752                 }
753                 else {
754                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
755                 }
756
757
758                 float sample_scale = 1.0f/(task.sample + 1);
759
760                 /* pass in parameters */
761                 void *args[] = {&d_rgba,
762                                 &d_buffer,
763                                 &sample_scale,
764                                 &task.x,
765                                 &task.y,
766                                 &task.w,
767                                 &task.h,
768                                 &task.offset,
769                                 &task.stride};
770
771                 /* launch kernel */
772                 int threads_per_block;
773                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
774
775                 int xthreads = (int)sqrt((float)threads_per_block);
776                 int ythreads = (int)sqrt((float)threads_per_block);
777                 int xblocks = (task.w + xthreads - 1)/xthreads;
778                 int yblocks = (task.h + ythreads - 1)/ythreads;
779
780                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
781
782                 cuda_assert(cuLaunchKernel(cuFilmConvert,
783                                            xblocks , yblocks, 1, /* blocks */
784                                            xthreads, ythreads, 1, /* threads */
785                                            0, 0, args, 0));
786
787                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
788
789                 cuda_pop_context();
790         }
791
792         void shader(DeviceTask& task)
793         {
794                 if(have_error())
795                         return;
796
797                 cuda_push_context();
798
799                 CUfunction cuShader;
800                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
801                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
802                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
803
804                 /* get kernel function */
805                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
806                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
807                 }
808                 else {
809                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
810                 }
811
812                 /* do tasks in smaller chunks, so we can cancel it */
813                 const int shader_chunk_size = 65536;
814                 const int start = task.shader_x;
815                 const int end = task.shader_x + task.shader_w;
816                 int offset = task.offset;
817
818                 bool canceled = false;
819                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
820                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
821                                 int shader_w = min(shader_chunk_size, end - shader_x);
822
823                                 /* pass in parameters */
824                                 void *args[8];
825                                 int arg = 0;
826                                 args[arg++] = &d_input;
827                                 args[arg++] = &d_output;
828                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
829                                         args[arg++] = &d_output_luma;
830                                 }
831                                 args[arg++] = &task.shader_eval_type;
832                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
833                                         args[arg++] = &task.shader_filter;
834                                 }
835                                 args[arg++] = &shader_x;
836                                 args[arg++] = &shader_w;
837                                 args[arg++] = &offset;
838                                 args[arg++] = &sample;
839
840                                 /* launch kernel */
841                                 int threads_per_block;
842                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
843
844                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
845
846                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
847                                 cuda_assert(cuLaunchKernel(cuShader,
848                                                            xblocks , 1, 1, /* blocks */
849                                                            threads_per_block, 1, 1, /* threads */
850                                                            0, 0, args, 0));
851
852                                 cuda_assert(cuCtxSynchronize());
853
854                                 if(task.get_cancel()) {
855                                         canceled = false;
856                                         break;
857                                 }
858                         }
859
860                         task.update_progress(NULL);
861                 }
862
863                 cuda_pop_context();
864         }
865
866         CUdeviceptr map_pixels(device_ptr mem)
867         {
868                 if(!background) {
869                         PixelMem pmem = pixel_mem_map[mem];
870                         CUdeviceptr buffer;
871                         
872                         size_t bytes;
873                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
874                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
875                         
876                         return buffer;
877                 }
878
879                 return cuda_device_ptr(mem);
880         }
881
882         void unmap_pixels(device_ptr mem)
883         {
884                 if(!background) {
885                         PixelMem pmem = pixel_mem_map[mem];
886
887                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
888                 }
889         }
890
891         void pixels_alloc(device_memory& mem)
892         {
893                 if(!background) {
894                         PixelMem pmem;
895
896                         pmem.w = mem.data_width;
897                         pmem.h = mem.data_height;
898
899                         cuda_push_context();
900
901                         glGenBuffers(1, &pmem.cuPBO);
902                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
903                         if(mem.data_type == TYPE_HALF)
904                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
905                         else
906                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
907                         
908                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
909                         
910                         glGenTextures(1, &pmem.cuTexId);
911                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
912                         if(mem.data_type == TYPE_HALF)
913                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
914                         else
915                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
916                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
917                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
918                         glBindTexture(GL_TEXTURE_2D, 0);
919                         
920                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
921
922                         if(result == CUDA_SUCCESS) {
923                                 cuda_pop_context();
924
925                                 mem.device_pointer = pmem.cuTexId;
926                                 pixel_mem_map[mem.device_pointer] = pmem;
927
928                                 mem.device_size = mem.memory_size();
929                                 stats.mem_alloc(mem.device_size);
930
931                                 return;
932                         }
933                         else {
934                                 /* failed to register buffer, fallback to no interop */
935                                 glDeleteBuffers(1, &pmem.cuPBO);
936                                 glDeleteTextures(1, &pmem.cuTexId);
937
938                                 cuda_pop_context();
939
940                                 background = true;
941                         }
942                 }
943
944                 Device::pixels_alloc(mem);
945         }
946
947         void pixels_copy_from(device_memory& mem, int y, int w, int h)
948         {
949                 if(!background) {
950                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
951
952                         cuda_push_context();
953
954                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
955                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
956                         size_t offset = sizeof(uchar)*4*y*w;
957                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
958                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
959                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
960
961                         cuda_pop_context();
962
963                         return;
964                 }
965
966                 Device::pixels_copy_from(mem, y, w, h);
967         }
968
969         void pixels_free(device_memory& mem)
970         {
971                 if(mem.device_pointer) {
972                         if(!background) {
973                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
974
975                                 cuda_push_context();
976
977                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
978                                 glDeleteBuffers(1, &pmem.cuPBO);
979                                 glDeleteTextures(1, &pmem.cuTexId);
980
981                                 cuda_pop_context();
982
983                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
984                                 mem.device_pointer = 0;
985
986                                 stats.mem_free(mem.device_size);
987                                 mem.device_size = 0;
988
989                                 return;
990                         }
991
992                         Device::pixels_free(mem);
993                 }
994         }
995
996         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
997                 const DeviceDrawParams &draw_params)
998         {
999                 if(!background) {
1000                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1001                         float *vpointer;
1002
1003                         cuda_push_context();
1004
1005                         /* for multi devices, this assumes the inefficient method that we allocate
1006                          * all pixels on the device even though we only render to a subset */
1007                         size_t offset = 4*y*w;
1008
1009                         if(mem.data_type == TYPE_HALF)
1010                                 offset *= sizeof(GLhalf);
1011                         else
1012                                 offset *= sizeof(uint8_t);
1013
1014                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1015                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1016                         if(mem.data_type == TYPE_HALF)
1017                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1018                         else
1019                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1020                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1021                         
1022                         glEnable(GL_TEXTURE_2D);
1023                         
1024                         if(transparent) {
1025                                 glEnable(GL_BLEND);
1026                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1027                         }
1028
1029                         glColor3f(1.0f, 1.0f, 1.0f);
1030
1031                         if(draw_params.bind_display_space_shader_cb) {
1032                                 draw_params.bind_display_space_shader_cb();
1033                         }
1034
1035                         if(!vertex_buffer)
1036                                 glGenBuffers(1, &vertex_buffer);
1037
1038                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1039                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1040                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1041
1042                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1043
1044                         if(vpointer) {
1045                                 /* texture coordinate - vertex pair */
1046                                 vpointer[0] = 0.0f;
1047                                 vpointer[1] = 0.0f;
1048                                 vpointer[2] = dx;
1049                                 vpointer[3] = dy;
1050
1051                                 vpointer[4] = (float)w/(float)pmem.w;
1052                                 vpointer[5] = 0.0f;
1053                                 vpointer[6] = (float)width + dx;
1054                                 vpointer[7] = dy;
1055
1056                                 vpointer[8] = (float)w/(float)pmem.w;
1057                                 vpointer[9] = (float)h/(float)pmem.h;
1058                                 vpointer[10] = (float)width + dx;
1059                                 vpointer[11] = (float)height + dy;
1060
1061                                 vpointer[12] = 0.0f;
1062                                 vpointer[13] = (float)h/(float)pmem.h;
1063                                 vpointer[14] = dx;
1064                                 vpointer[15] = (float)height + dy;
1065
1066                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1067                         }
1068
1069                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1070                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1071
1072                         glEnableClientState(GL_VERTEX_ARRAY);
1073                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1074
1075                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1076
1077                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1078                         glDisableClientState(GL_VERTEX_ARRAY);
1079
1080                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1081
1082                         if(draw_params.unbind_display_space_shader_cb) {
1083                                 draw_params.unbind_display_space_shader_cb();
1084                         }
1085
1086                         if(transparent)
1087                                 glDisable(GL_BLEND);
1088                         
1089                         glBindTexture(GL_TEXTURE_2D, 0);
1090                         glDisable(GL_TEXTURE_2D);
1091
1092                         cuda_pop_context();
1093
1094                         return;
1095                 }
1096
1097                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1098         }
1099
1100         void thread_run(DeviceTask *task)
1101         {
1102                 if(task->type == DeviceTask::PATH_TRACE) {
1103                         RenderTile tile;
1104                         
1105                         bool branched = task->integrator_branched;
1106                         
1107                         /* keep rendering tiles until done */
1108                         while(task->acquire_tile(this, tile)) {
1109                                 int start_sample = tile.start_sample;
1110                                 int end_sample = tile.start_sample + tile.num_samples;
1111
1112                                 for(int sample = start_sample; sample < end_sample; sample++) {
1113                                         if(task->get_cancel()) {
1114                                                 if(task->need_finish_queue == false)
1115                                                         break;
1116                                         }
1117
1118                                         path_trace(tile, sample, branched);
1119
1120                                         tile.sample = sample + 1;
1121
1122                                         task->update_progress(&tile);
1123                                 }
1124
1125                                 task->release_tile(tile);
1126                         }
1127                 }
1128                 else if(task->type == DeviceTask::SHADER) {
1129                         shader(*task);
1130
1131                         cuda_push_context();
1132                         cuda_assert(cuCtxSynchronize());
1133                         cuda_pop_context();
1134                 }
1135         }
1136
1137         class CUDADeviceTask : public DeviceTask {
1138         public:
1139                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1140                 : DeviceTask(task)
1141                 {
1142                         run = function_bind(&CUDADevice::thread_run, device, this);
1143                 }
1144         };
1145
1146         int get_split_task_count(DeviceTask& /*task*/)
1147         {
1148                 return 1;
1149         }
1150
1151         void task_add(DeviceTask& task)
1152         {
1153                 if(task.type == DeviceTask::FILM_CONVERT) {
1154                         /* must be done in main thread due to opengl access */
1155                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1156
1157                         cuda_push_context();
1158                         cuda_assert(cuCtxSynchronize());
1159                         cuda_pop_context();
1160                 }
1161                 else {
1162                         task_pool.push(new CUDADeviceTask(this, task));
1163                 }
1164         }
1165
1166         void task_wait()
1167         {
1168                 task_pool.wait();
1169         }
1170
1171         void task_cancel()
1172         {
1173                 task_pool.cancel();
1174         }
1175 };
1176
1177 bool device_cuda_init(void)
1178 {
1179 #ifdef WITH_CUDA_DYNLOAD
1180         static bool initialized = false;
1181         static bool result = false;
1182
1183         if(initialized)
1184                 return result;
1185
1186         initialized = true;
1187         int cuew_result = cuewInit();
1188         if(cuew_result == CUEW_SUCCESS) {
1189                 VLOG(1) << "CUEW initialization succeeded";
1190                 if(CUDADevice::have_precompiled_kernels()) {
1191                         VLOG(1) << "Found precompiled  kernels";
1192                         result = true;
1193                 }
1194 #ifndef _WIN32
1195                 else if(cuewCompilerPath() != NULL) {
1196                         VLOG(1) << "Found CUDA compiled " << cuewCompilerPath();
1197                         result = true;
1198                 }
1199                 else {
1200                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1201                                 << " unable to use CUDA";
1202                 }
1203 #endif
1204         }
1205         else {
1206                 VLOG(1) << "CUEW initialization failed: "
1207                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1208                             ? "Error setting up atexit() handler"
1209                             : "Error opening the library");
1210         }
1211
1212         return result;
1213 #else  /* WITH_CUDA_DYNLOAD */
1214         return true;
1215 #endif /* WITH_CUDA_DYNLOAD */
1216 }
1217
1218 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1219 {
1220         return new CUDADevice(info, stats, background);
1221 }
1222
1223 void device_cuda_info(vector<DeviceInfo>& devices)
1224 {
1225         CUresult result;
1226         int count = 0;
1227
1228         result = cuInit(0);
1229         if(result != CUDA_SUCCESS) {
1230                 if(result != CUDA_ERROR_NO_DEVICE)
1231                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1232                 return;
1233         }
1234
1235         result = cuDeviceGetCount(&count);
1236         if(result != CUDA_SUCCESS) {
1237                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1238                 return;
1239         }
1240         
1241         vector<DeviceInfo> display_devices;
1242
1243         for(int num = 0; num < count; num++) {
1244                 char name[256];
1245                 int attr;
1246
1247                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1248                         continue;
1249
1250                 int major, minor;
1251                 cuDeviceComputeCapability(&major, &minor, num);
1252                 if(major < 2) {
1253                         continue;
1254                 }
1255
1256                 DeviceInfo info;
1257
1258                 info.type = DEVICE_CUDA;
1259                 info.description = string(name);
1260                 info.id = string_printf("CUDA_%d", num);
1261                 info.num = num;
1262
1263                 info.advanced_shading = (major >= 2);
1264                 info.extended_images = (major >= 3);
1265                 info.pack_images = false;
1266
1267                 /* if device has a kernel timeout, assume it is used for display */
1268                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1269                         info.display_device = true;
1270                         display_devices.push_back(info);
1271                 }
1272                 else
1273                         devices.push_back(info);
1274         }
1275
1276         if(!display_devices.empty())
1277                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1278 }
1279
1280 string device_cuda_capabilities(void)
1281 {
1282         CUresult result = cuInit(0);
1283         if(result != CUDA_SUCCESS) {
1284                 if(result != CUDA_ERROR_NO_DEVICE) {
1285                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1286                 }
1287                 return "No CUDA device found\n";
1288         }
1289
1290         int count;
1291         result = cuDeviceGetCount(&count);
1292         if(result != CUDA_SUCCESS) {
1293                 return string("Error getting devices: ") + cuewErrorString(result);
1294         }
1295
1296         string capabilities = "";
1297         for(int num = 0; num < count; num++) {
1298                 char name[256];
1299                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1300                         continue;
1301                 }
1302                 capabilities += string("\t") + name + "\n";
1303                 int value;
1304 #define GET_ATTR(attr) \
1305                 { \
1306                         if(cuDeviceGetAttribute(&value, \
1307                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1308                                                 num) == CUDA_SUCCESS) \
1309                         { \
1310                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1311                                                               value); \
1312                         } \
1313                 } (void)0
1314                 /* TODO(sergey): Strip all attributes which are not useful for us
1315                  * or does not depend on the driver.
1316                  */
1317                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1318                 GET_ATTR(MAX_BLOCK_DIM_X);
1319                 GET_ATTR(MAX_BLOCK_DIM_Y);
1320                 GET_ATTR(MAX_BLOCK_DIM_Z);
1321                 GET_ATTR(MAX_GRID_DIM_X);
1322                 GET_ATTR(MAX_GRID_DIM_Y);
1323                 GET_ATTR(MAX_GRID_DIM_Z);
1324                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1325                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1326                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1327                 GET_ATTR(WARP_SIZE);
1328                 GET_ATTR(MAX_PITCH);
1329                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1330                 GET_ATTR(REGISTERS_PER_BLOCK);
1331                 GET_ATTR(CLOCK_RATE);
1332                 GET_ATTR(TEXTURE_ALIGNMENT);
1333                 GET_ATTR(GPU_OVERLAP);
1334                 GET_ATTR(MULTIPROCESSOR_COUNT);
1335                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1336                 GET_ATTR(INTEGRATED);
1337                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1338                 GET_ATTR(COMPUTE_MODE);
1339                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1340                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1341                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1342                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1343                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1344                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1345                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1346                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1347                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1348                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1349                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1350                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1351                 GET_ATTR(SURFACE_ALIGNMENT);
1352                 GET_ATTR(CONCURRENT_KERNELS);
1353                 GET_ATTR(ECC_ENABLED);
1354                 GET_ATTR(TCC_DRIVER);
1355                 GET_ATTR(MEMORY_CLOCK_RATE);
1356                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1357                 GET_ATTR(L2_CACHE_SIZE);
1358                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1359                 GET_ATTR(ASYNC_ENGINE_COUNT);
1360                 GET_ATTR(UNIFIED_ADDRESSING);
1361                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1362                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1363                 GET_ATTR(CAN_TEX2D_GATHER);
1364                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1365                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1366                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1367                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1368                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1369                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1370                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1371                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1372                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1373                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1374                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1375                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1376                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1377                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1378                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1379                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1380                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1381                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1382                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1383                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1384                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1385                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1386                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1387                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1388                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1389                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1390                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1391                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1392                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1393                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1394                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1395                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1396                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1397                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1398                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1399                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1400                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1401                 GET_ATTR(MANAGED_MEMORY);
1402                 GET_ATTR(MULTI_GPU_BOARD);
1403                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1404 #undef GET_ATTR
1405                 capabilities += "\n";
1406         }
1407
1408         return capabilities;
1409 }
1410
1411 CCL_NAMESPACE_END