Cycles: Code cleanup, spaces around keywords
[blender-staging.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #include "cuew.h"
27 #include "util_debug.h"
28 #include "util_logging.h"
29 #include "util_map.h"
30 #include "util_opengl.h"
31 #include "util_path.h"
32 #include "util_string.h"
33 #include "util_system.h"
34 #include "util_types.h"
35 #include "util_time.h"
36
37 CCL_NAMESPACE_BEGIN
38
39 class CUDADevice : public Device
40 {
41 public:
42         DedicatedTaskPool task_pool;
43         CUdevice cuDevice;
44         CUcontext cuContext;
45         CUmodule cuModule;
46         map<device_ptr, bool> tex_interp_map;
47         int cuDevId;
48         int cuDevArchitecture;
49         bool first_error;
50         bool use_texture_storage;
51
52         struct PixelMem {
53                 GLuint cuPBO;
54                 CUgraphicsResource cuPBOresource;
55                 GLuint cuTexId;
56                 int w, h;
57         };
58
59         map<device_ptr, PixelMem> pixel_mem_map;
60
61         CUdeviceptr cuda_device_ptr(device_ptr mem)
62         {
63                 return (CUdeviceptr)mem;
64         }
65
66         static bool have_precompiled_kernels()
67         {
68                 string cubins_path = path_get("lib");
69                 return path_exists(cubins_path);
70         }
71
72 /*#ifdef NDEBUG
73 #define cuda_abort()
74 #else
75 #define cuda_abort() abort()
76 #endif*/
77         void cuda_error_documentation()
78         {
79                 if(first_error) {
80                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
81                         fprintf(stderr, "http://www.blender.org/manual/render/cycles/gpu_rendering.html\n\n");
82                         first_error = false;
83                 }
84         }
85
86 #define cuda_assert(stmt) \
87         { \
88                 CUresult result = stmt; \
89                 \
90                 if(result != CUDA_SUCCESS) { \
91                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
92                         if(error_msg == "") \
93                                 error_msg = message; \
94                         fprintf(stderr, "%s\n", message.c_str()); \
95                         /*cuda_abort();*/ \
96                         cuda_error_documentation(); \
97                 } \
98         } (void)0
99
100         bool cuda_error_(CUresult result, const string& stmt)
101         {
102                 if(result == CUDA_SUCCESS)
103                         return false;
104
105                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
106                 if(error_msg == "")
107                         error_msg = message;
108                 fprintf(stderr, "%s\n", message.c_str());
109                 cuda_error_documentation();
110                 return true;
111         }
112
113 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
114
115         void cuda_error_message(const string& message)
116         {
117                 if(error_msg == "")
118                         error_msg = message;
119                 fprintf(stderr, "%s\n", message.c_str());
120                 cuda_error_documentation();
121         }
122
123         void cuda_push_context()
124         {
125                 cuda_assert(cuCtxSetCurrent(cuContext));
126         }
127
128         void cuda_pop_context()
129         {
130                 cuda_assert(cuCtxSetCurrent(NULL));
131         }
132
133         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
134         : Device(info, stats, background_)
135         {
136                 first_error = true;
137                 background = background_;
138                 use_texture_storage = true;
139
140                 cuDevId = info.num;
141                 cuDevice = 0;
142                 cuContext = 0;
143
144                 /* intialize */
145                 if(cuda_error(cuInit(0)))
146                         return;
147
148                 /* setup device and context */
149                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
150                         return;
151
152                 CUresult result;
153
154                 if(background) {
155                         result = cuCtxCreate(&cuContext, 0, cuDevice);
156                 }
157                 else {
158                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
159
160                         if(result != CUDA_SUCCESS) {
161                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
162                                 background = true;
163                         }
164                 }
165
166                 if(cuda_error_(result, "cuCtxCreate"))
167                         return;
168
169                 int major, minor;
170                 cuDeviceComputeCapability(&major, &minor, cuDevId);
171                 cuDevArchitecture = major*100 + minor*10;
172
173                 /* In order to use full 6GB of memory on Titan cards, use arrays instead
174                  * of textures. On earlier cards this seems slower, but on Titan it is
175                  * actually slightly faster in tests. */
176                 use_texture_storage = (cuDevArchitecture < 300);
177
178                 cuda_pop_context();
179         }
180
181         ~CUDADevice()
182         {
183                 task_pool.stop();
184
185                 cuda_assert(cuCtxDestroy(cuContext));
186         }
187
188         bool support_device(bool /*experimental*/)
189         {
190                 int major, minor;
191                 cuDeviceComputeCapability(&major, &minor, cuDevId);
192                 
193                 /* We only support sm_20 and above */
194                 if(major < 2) {
195                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
196                         return false;
197                 }
198                 
199                 return true;
200         }
201
202         string compile_kernel(bool experimental)
203         {
204                 /* compute cubin name */
205                 int major, minor;
206                 cuDeviceComputeCapability(&major, &minor, cuDevId);
207                 string cubin;
208
209                 /* attempt to use kernel provided with blender */
210                 if(experimental)
211                         cubin = path_get(string_printf("lib/kernel_experimental_sm_%d%d.cubin", major, minor));
212                 else
213                         cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
214                 VLOG(1) << "Testing for pre-compiled kernel " << cubin;
215                 if(path_exists(cubin)) {
216                         VLOG(1) << "Using precompiled kernel";
217                         return cubin;
218                 }
219
220                 /* not found, try to use locally compiled kernel */
221                 string kernel_path = path_get("kernel");
222                 string md5 = path_files_md5_hash(kernel_path);
223
224                 if(experimental)
225                         cubin = string_printf("cycles_kernel_experimental_sm%d%d_%s.cubin", major, minor, md5.c_str());
226                 else
227                         cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
228                 cubin = path_user_get(path_join("cache", cubin));
229                 VLOG(1) << "Testing for locally compiled kernel " << cubin;
230                 /* if exists already, use it */
231                 if(path_exists(cubin)) {
232                         VLOG(1) << "Using locally compiled kernel";
233                         return cubin;
234                 }
235
236 #ifdef _WIN32
237                 if(have_precompiled_kernels()) {
238                         if(major < 2)
239                                 cuda_error_message(string_printf("CUDA device requires compute capability 2.0 or up, found %d.%d. Your GPU is not supported.", major, minor));
240                         else
241                                 cuda_error_message(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
242                         return "";
243                 }
244 #endif
245
246                 /* if not, find CUDA compiler */
247                 const char *nvcc = cuewCompilerPath();
248
249                 if(nvcc == NULL) {
250                         cuda_error_message("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
251                         return "";
252                 }
253
254                 int cuda_version = cuewCompilerVersion();
255                 VLOG(1) << "Found nvcc " << nvcc << ", CUDA version " << cuda_version;
256
257                 if(cuda_version == 0) {
258                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
259                         return "";
260                 }
261                 if(cuda_version < 60) {
262                         printf("Unsupported CUDA version %d.%d detected, you need CUDA 6.5.\n", cuda_version/10, cuda_version%10);
263                         return "";
264                 }
265                 else if(cuda_version != 65)
266                         printf("CUDA version %d.%d detected, build may succeed but only CUDA 6.5 is officially supported.\n", cuda_version/10, cuda_version%10);
267
268                 /* compile */
269                 string kernel = path_join(kernel_path, "kernel.cu");
270                 string include = kernel_path;
271                 const int machine = system_cpu_bits();
272
273                 double starttime = time_dt();
274                 printf("Compiling CUDA kernel ...\n");
275
276                 path_create_directories(cubin);
277
278                 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
279                         "-o \"%s\" --ptxas-options=\"-v\" --use_fast_math -I\"%s\" "
280                         "-DNVCC -D__KERNEL_CUDA_VERSION__=%d",
281                         nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
282                 
283                 if(experimental)
284                         command += " -D__KERNEL_CUDA_EXPERIMENTAL__";
285
286                 if(getenv("CYCLES_CUDA_EXTRA_CFLAGS")) {
287                         command += string(" ") + getenv("CYCLES_CUDA_EXTRA_CFLAGS");
288                 }
289
290 #ifdef WITH_CYCLES_DEBUG
291                 command += " -D__KERNEL_DEBUG__";
292 #endif
293
294                 printf("%s\n", command.c_str());
295
296                 if(system(command.c_str()) == -1) {
297                         cuda_error_message("Failed to execute compilation command, see console for details.");
298                         return "";
299                 }
300
301                 /* verify if compilation succeeded */
302                 if(!path_exists(cubin)) {
303                         cuda_error_message("CUDA kernel compilation failed, see console for details.");
304                         return "";
305                 }
306
307                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
308
309                 return cubin;
310         }
311
312         bool load_kernels(bool experimental)
313         {
314                 /* check if cuda init succeeded */
315                 if(cuContext == 0)
316                         return false;
317                 
318                 /* check if GPU is supported */
319                 if(!support_device(experimental))
320                         return false;
321
322                 /* get kernel */
323                 string cubin = compile_kernel(experimental);
324
325                 if(cubin == "")
326                         return false;
327
328                 /* open module */
329                 cuda_push_context();
330
331                 string cubin_data;
332                 CUresult result;
333
334                 if(path_read_text(cubin, cubin_data))
335                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
336                 else
337                         result = CUDA_ERROR_FILE_NOT_FOUND;
338
339                 if(cuda_error_(result, "cuModuleLoad"))
340                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
341
342                 cuda_pop_context();
343
344                 return (result == CUDA_SUCCESS);
345         }
346
347         void mem_alloc(device_memory& mem, MemoryType /*type*/)
348         {
349                 cuda_push_context();
350                 CUdeviceptr device_pointer;
351                 size_t size = mem.memory_size();
352                 cuda_assert(cuMemAlloc(&device_pointer, size));
353                 mem.device_pointer = (device_ptr)device_pointer;
354                 mem.device_size = size;
355                 stats.mem_alloc(size);
356                 cuda_pop_context();
357         }
358
359         void mem_copy_to(device_memory& mem)
360         {
361                 cuda_push_context();
362                 if(mem.device_pointer)
363                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
364                 cuda_pop_context();
365         }
366
367         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
368         {
369                 size_t offset = elem*y*w;
370                 size_t size = elem*w*h;
371
372                 cuda_push_context();
373                 if(mem.device_pointer) {
374                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
375                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
376                 }
377                 else {
378                         memset((char*)mem.data_pointer + offset, 0, size);
379                 }
380                 cuda_pop_context();
381         }
382
383         void mem_zero(device_memory& mem)
384         {
385                 memset((void*)mem.data_pointer, 0, mem.memory_size());
386
387                 cuda_push_context();
388                 if(mem.device_pointer)
389                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
390                 cuda_pop_context();
391         }
392
393         void mem_free(device_memory& mem)
394         {
395                 if(mem.device_pointer) {
396                         cuda_push_context();
397                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
398                         cuda_pop_context();
399
400                         mem.device_pointer = 0;
401
402                         stats.mem_free(mem.device_size);
403                         mem.device_size = 0;
404                 }
405         }
406
407         void const_copy_to(const char *name, void *host, size_t size)
408         {
409                 CUdeviceptr mem;
410                 size_t bytes;
411
412                 cuda_push_context();
413                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
414                 //assert(bytes == size);
415                 cuda_assert(cuMemcpyHtoD(mem, host, size));
416                 cuda_pop_context();
417         }
418
419         void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
420         {
421                 /* todo: support 3D textures, only CPU for now */
422
423                 /* determine format */
424                 CUarray_format_enum format;
425                 size_t dsize = datatype_size(mem.data_type);
426                 size_t size = mem.memory_size();
427                 bool use_texture = (interpolation != INTERPOLATION_NONE) || use_texture_storage;
428
429                 if(use_texture) {
430
431                         switch(mem.data_type) {
432                                 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
433                                 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
434                                 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
435                                 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
436                                 default: assert(0); return;
437                         }
438
439                         CUtexref texref = NULL;
440
441                         cuda_push_context();
442                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, name));
443
444                         if(!texref) {
445                                 cuda_pop_context();
446                                 return;
447                         }
448
449                         if(interpolation != INTERPOLATION_NONE) {
450                                 CUarray handle = NULL;
451                                 CUDA_ARRAY_DESCRIPTOR desc;
452
453                                 desc.Width = mem.data_width;
454                                 desc.Height = mem.data_height;
455                                 desc.Format = format;
456                                 desc.NumChannels = mem.data_elements;
457
458                                 cuda_assert(cuArrayCreate(&handle, &desc));
459
460                                 if(!handle) {
461                                         cuda_pop_context();
462                                         return;
463                                 }
464
465                                 if(mem.data_height > 1) {
466                                         CUDA_MEMCPY2D param;
467                                         memset(&param, 0, sizeof(param));
468                                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
469                                         param.dstArray = handle;
470                                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
471                                         param.srcHost = (void*)mem.data_pointer;
472                                         param.srcPitch = mem.data_width*dsize*mem.data_elements;
473                                         param.WidthInBytes = param.srcPitch;
474                                         param.Height = mem.data_height;
475
476                                         cuda_assert(cuMemcpy2D(&param));
477                                 }
478                                 else
479                                         cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
480
481                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
482
483                                 if(interpolation == INTERPOLATION_CLOSEST) {
484                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
485                                 }
486                                 else if(interpolation == INTERPOLATION_LINEAR) {
487                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
488                                 }
489                                 else {/* CUBIC and SMART are unsupported for CUDA */
490                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
491                                 }
492                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
493
494                                 mem.device_pointer = (device_ptr)handle;
495                                 mem.device_size = size;
496
497                                 stats.mem_alloc(size);
498                         }
499                         else {
500                                 cuda_pop_context();
501
502                                 mem_alloc(mem, MEM_READ_ONLY);
503                                 mem_copy_to(mem);
504
505                                 cuda_push_context();
506
507                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
508                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
509                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
510                         }
511
512                         if(periodic) {
513                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_WRAP));
514                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_WRAP));
515                         }
516                         else {
517                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_CLAMP));
518                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_CLAMP));
519                         }
520                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
521
522                         cuda_pop_context();
523                 }
524                 else {
525                         mem_alloc(mem, MEM_READ_ONLY);
526                         mem_copy_to(mem);
527
528                         cuda_push_context();
529
530                         CUdeviceptr cumem;
531                         size_t cubytes;
532
533                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, name));
534
535                         if(cubytes == 8) {
536                                 /* 64 bit device pointer */
537                                 uint64_t ptr = mem.device_pointer;
538                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
539                         }
540                         else {
541                                 /* 32 bit device pointer */
542                                 uint32_t ptr = (uint32_t)mem.device_pointer;
543                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
544                         }
545
546                         cuda_pop_context();
547                 }
548
549                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
550         }
551
552         void tex_free(device_memory& mem)
553         {
554                 if(mem.device_pointer) {
555                         if(tex_interp_map[mem.device_pointer]) {
556                                 cuda_push_context();
557                                 cuArrayDestroy((CUarray)mem.device_pointer);
558                                 cuda_pop_context();
559
560                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
561                                 mem.device_pointer = 0;
562
563                                 stats.mem_free(mem.device_size);
564                                 mem.device_size = 0;
565                         }
566                         else {
567                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
568                                 mem_free(mem);
569                         }
570                 }
571         }
572
573         void path_trace(RenderTile& rtile, int sample, bool branched)
574         {
575                 if(have_error())
576                         return;
577
578                 cuda_push_context();
579
580                 CUfunction cuPathTrace;
581                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
582                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
583
584                 /* get kernel function */
585                 if(branched) {
586                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
587                 }
588                 else {
589                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
590                 }
591
592                 if(have_error())
593                         return;
594
595                 /* pass in parameters */
596                 void *args[] = {&d_buffer,
597                                                  &d_rng_state,
598                                                  &sample,
599                                                  &rtile.x,
600                                                  &rtile.y,
601                                                  &rtile.w,
602                                                  &rtile.h,
603                                                  &rtile.offset,
604                                                  &rtile.stride};
605
606                 /* launch kernel */
607                 int threads_per_block;
608                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
609
610                 /*int num_registers;
611                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
612
613                 printf("threads_per_block %d\n", threads_per_block);
614                 printf("num_registers %d\n", num_registers);*/
615
616                 int xthreads = (int)sqrt((float)threads_per_block);
617                 int ythreads = (int)sqrt((float)threads_per_block);
618                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
619                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
620
621                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
622
623                 cuda_assert(cuLaunchKernel(cuPathTrace,
624                                                                    xblocks , yblocks, 1, /* blocks */
625                                                                    xthreads, ythreads, 1, /* threads */
626                                                                    0, 0, args, 0));
627
628                 cuda_assert(cuCtxSynchronize());
629
630                 cuda_pop_context();
631         }
632
633         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
634         {
635                 if(have_error())
636                         return;
637
638                 cuda_push_context();
639
640                 CUfunction cuFilmConvert;
641                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
642                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
643
644                 /* get kernel function */
645                 if(rgba_half) {
646                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
647                 }
648                 else {
649                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
650                 }
651
652
653                 float sample_scale = 1.0f/(task.sample + 1);
654
655                 /* pass in parameters */
656                 void *args[] = {&d_rgba,
657                                                  &d_buffer,
658                                                  &sample_scale,
659                                                  &task.x,
660                                                  &task.y,
661                                                  &task.w,
662                                                  &task.h,
663                                                  &task.offset,
664                                                  &task.stride};
665
666                 /* launch kernel */
667                 int threads_per_block;
668                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
669
670                 int xthreads = (int)sqrt((float)threads_per_block);
671                 int ythreads = (int)sqrt((float)threads_per_block);
672                 int xblocks = (task.w + xthreads - 1)/xthreads;
673                 int yblocks = (task.h + ythreads - 1)/ythreads;
674
675                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
676
677                 cuda_assert(cuLaunchKernel(cuFilmConvert,
678                                                                    xblocks , yblocks, 1, /* blocks */
679                                                                    xthreads, ythreads, 1, /* threads */
680                                                                    0, 0, args, 0));
681
682                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
683
684                 cuda_pop_context();
685         }
686
687         void shader(DeviceTask& task)
688         {
689                 if(have_error())
690                         return;
691
692                 cuda_push_context();
693
694                 CUfunction cuShader;
695                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
696                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
697
698                 /* get kernel function */
699                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
700                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
701                 }
702                 else {
703                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
704                 }
705
706                 /* do tasks in smaller chunks, so we can cancel it */
707                 const int shader_chunk_size = 65536;
708                 const int start = task.shader_x;
709                 const int end = task.shader_x + task.shader_w;
710                 int offset = task.offset;
711
712                 bool canceled = false;
713                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
714                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
715                                 int shader_w = min(shader_chunk_size, end - shader_x);
716
717                                 /* pass in parameters */
718                                 void *args[] = {&d_input,
719                                                                  &d_output,
720                                                                  &task.shader_eval_type,
721                                                                  &shader_x,
722                                                                  &shader_w,
723                                                                  &offset,
724                                                                  &sample};
725
726                                 /* launch kernel */
727                                 int threads_per_block;
728                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
729
730                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
731
732                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
733                                 cuda_assert(cuLaunchKernel(cuShader,
734                                                                                    xblocks , 1, 1, /* blocks */
735                                                                                    threads_per_block, 1, 1, /* threads */
736                                                                                    0, 0, args, 0));
737
738                                 cuda_assert(cuCtxSynchronize());
739
740                                 if(task.get_cancel()) {
741                                         canceled = false;
742                                         break;
743                                 }
744                         }
745
746                         task.update_progress(NULL);
747                 }
748
749                 cuda_pop_context();
750         }
751
752         CUdeviceptr map_pixels(device_ptr mem)
753         {
754                 if(!background) {
755                         PixelMem pmem = pixel_mem_map[mem];
756                         CUdeviceptr buffer;
757                         
758                         size_t bytes;
759                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
760                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
761                         
762                         return buffer;
763                 }
764
765                 return cuda_device_ptr(mem);
766         }
767
768         void unmap_pixels(device_ptr mem)
769         {
770                 if(!background) {
771                         PixelMem pmem = pixel_mem_map[mem];
772
773                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
774                 }
775         }
776
777         void pixels_alloc(device_memory& mem)
778         {
779                 if(!background) {
780                         PixelMem pmem;
781
782                         pmem.w = mem.data_width;
783                         pmem.h = mem.data_height;
784
785                         cuda_push_context();
786
787                         glGenBuffers(1, &pmem.cuPBO);
788                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
789                         if(mem.data_type == TYPE_HALF)
790                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
791                         else
792                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
793                         
794                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
795                         
796                         glGenTextures(1, &pmem.cuTexId);
797                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
798                         if(mem.data_type == TYPE_HALF)
799                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
800                         else
801                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
802                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
803                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
804                         glBindTexture(GL_TEXTURE_2D, 0);
805                         
806                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
807
808                         if(result == CUDA_SUCCESS) {
809                                 cuda_pop_context();
810
811                                 mem.device_pointer = pmem.cuTexId;
812                                 pixel_mem_map[mem.device_pointer] = pmem;
813
814                                 mem.device_size = mem.memory_size();
815                                 stats.mem_alloc(mem.device_size);
816
817                                 return;
818                         }
819                         else {
820                                 /* failed to register buffer, fallback to no interop */
821                                 glDeleteBuffers(1, &pmem.cuPBO);
822                                 glDeleteTextures(1, &pmem.cuTexId);
823
824                                 cuda_pop_context();
825
826                                 background = true;
827                         }
828                 }
829
830                 Device::pixels_alloc(mem);
831         }
832
833         void pixels_copy_from(device_memory& mem, int y, int w, int h)
834         {
835                 if(!background) {
836                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
837
838                         cuda_push_context();
839
840                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
841                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
842                         size_t offset = sizeof(uchar)*4*y*w;
843                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
844                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
845                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
846
847                         cuda_pop_context();
848
849                         return;
850                 }
851
852                 Device::pixels_copy_from(mem, y, w, h);
853         }
854
855         void pixels_free(device_memory& mem)
856         {
857                 if(mem.device_pointer) {
858                         if(!background) {
859                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
860
861                                 cuda_push_context();
862
863                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
864                                 glDeleteBuffers(1, &pmem.cuPBO);
865                                 glDeleteTextures(1, &pmem.cuTexId);
866
867                                 cuda_pop_context();
868
869                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
870                                 mem.device_pointer = 0;
871
872                                 stats.mem_free(mem.device_size);
873                                 mem.device_size = 0;
874
875                                 return;
876                         }
877
878                         Device::pixels_free(mem);
879                 }
880         }
881
882         void draw_pixels(device_memory& mem, int y, int w, int h, int dy, int width, int height, bool transparent,
883                 const DeviceDrawParams &draw_params)
884         {
885                 if(!background) {
886                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
887
888                         cuda_push_context();
889
890                         /* for multi devices, this assumes the inefficient method that we allocate
891                          * all pixels on the device even though we only render to a subset */
892                         size_t offset = 4*y*w;
893
894                         if(mem.data_type == TYPE_HALF)
895                                 offset *= sizeof(GLhalf);
896                         else
897                                 offset *= sizeof(uint8_t);
898
899                         glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pmem.cuPBO);
900                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
901                         if(mem.data_type == TYPE_HALF)
902                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
903                         else
904                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
905                         glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
906                         
907                         glEnable(GL_TEXTURE_2D);
908                         
909                         if(transparent) {
910                                 glEnable(GL_BLEND);
911                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
912                         }
913
914                         glColor3f(1.0f, 1.0f, 1.0f);
915
916                         if(draw_params.bind_display_space_shader_cb) {
917                                 draw_params.bind_display_space_shader_cb();
918                         }
919
920                         glPushMatrix();
921                         glTranslatef(0.0f, (float)dy, 0.0f);
922                                 
923                         glBegin(GL_QUADS);
924                         
925                         glTexCoord2f(0.0f, 0.0f);
926                         glVertex2f(0.0f, 0.0f);
927                         glTexCoord2f((float)w/(float)pmem.w, 0.0f);
928                         glVertex2f((float)width, 0.0f);
929                         glTexCoord2f((float)w/(float)pmem.w, (float)h/(float)pmem.h);
930                         glVertex2f((float)width, (float)height);
931                         glTexCoord2f(0.0f, (float)h/(float)pmem.h);
932                         glVertex2f(0.0f, (float)height);
933
934                         glEnd();
935
936                         glPopMatrix();
937
938                         if(draw_params.unbind_display_space_shader_cb) {
939                                 draw_params.unbind_display_space_shader_cb();
940                         }
941
942                         if(transparent)
943                                 glDisable(GL_BLEND);
944                         
945                         glBindTexture(GL_TEXTURE_2D, 0);
946                         glDisable(GL_TEXTURE_2D);
947
948                         cuda_pop_context();
949
950                         return;
951                 }
952
953                 Device::draw_pixels(mem, y, w, h, dy, width, height, transparent, draw_params);
954         }
955
956         void thread_run(DeviceTask *task)
957         {
958                 if(task->type == DeviceTask::PATH_TRACE) {
959                         RenderTile tile;
960                         
961                         bool branched = task->integrator_branched;
962                         
963                         /* keep rendering tiles until done */
964                         while(task->acquire_tile(this, tile)) {
965                                 int start_sample = tile.start_sample;
966                                 int end_sample = tile.start_sample + tile.num_samples;
967
968                                 for(int sample = start_sample; sample < end_sample; sample++) {
969                                         if(task->get_cancel()) {
970                                                 if(task->need_finish_queue == false)
971                                                         break;
972                                         }
973
974                                         path_trace(tile, sample, branched);
975
976                                         tile.sample = sample + 1;
977
978                                         task->update_progress(&tile);
979                                 }
980
981                                 task->release_tile(tile);
982                         }
983                 }
984                 else if(task->type == DeviceTask::SHADER) {
985                         shader(*task);
986
987                         cuda_push_context();
988                         cuda_assert(cuCtxSynchronize());
989                         cuda_pop_context();
990                 }
991         }
992
993         class CUDADeviceTask : public DeviceTask {
994         public:
995                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
996                 : DeviceTask(task)
997                 {
998                         run = function_bind(&CUDADevice::thread_run, device, this);
999                 }
1000         };
1001
1002         int get_split_task_count(DeviceTask& /*task*/)
1003         {
1004                 return 1;
1005         }
1006
1007         void task_add(DeviceTask& task)
1008         {
1009                 if(task.type == DeviceTask::FILM_CONVERT) {
1010                         /* must be done in main thread due to opengl access */
1011                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1012
1013                         cuda_push_context();
1014                         cuda_assert(cuCtxSynchronize());
1015                         cuda_pop_context();
1016                 }
1017                 else {
1018                         task_pool.push(new CUDADeviceTask(this, task));
1019                 }
1020         }
1021
1022         void task_wait()
1023         {
1024                 task_pool.wait();
1025         }
1026
1027         void task_cancel()
1028         {
1029                 task_pool.cancel();
1030         }
1031 };
1032
1033 bool device_cuda_init(void)
1034 {
1035         static bool initialized = false;
1036         static bool result = false;
1037
1038         if(initialized)
1039                 return result;
1040
1041         initialized = true;
1042         int cuew_result = cuewInit();
1043         if(cuew_result == CUEW_SUCCESS) {
1044                 VLOG(1) << "CUEW initialization succeeded";
1045                 if(CUDADevice::have_precompiled_kernels()) {
1046                         VLOG(1) << "Found precompiled  kernels";
1047                         result = true;
1048                 }
1049 #ifndef _WIN32
1050                 else if(cuewCompilerPath() != NULL) {
1051                         VLOG(1) << "Found CUDA compiled " << cuewCompilerPath();
1052                         result = true;
1053                 }
1054                 else {
1055                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1056                                 << " unable to use CUDA";
1057                 }
1058 #endif
1059         }
1060         else {
1061                 VLOG(1) << "CUEW initialization failed: "
1062                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1063                             ? "Error setting up atexit() handler"
1064                             : "Error opening the library");
1065         }
1066
1067         return result;
1068 }
1069
1070 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1071 {
1072         return new CUDADevice(info, stats, background);
1073 }
1074
1075 void device_cuda_info(vector<DeviceInfo>& devices)
1076 {
1077         CUresult result;
1078         int count = 0;
1079
1080         result = cuInit(0);
1081         if(result != CUDA_SUCCESS) {
1082                 if(result != CUDA_ERROR_NO_DEVICE)
1083                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1084                 return;
1085         }
1086
1087         result = cuDeviceGetCount(&count);
1088         if(result != CUDA_SUCCESS) {
1089                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1090                 return;
1091         }
1092         
1093         vector<DeviceInfo> display_devices;
1094         
1095         for(int num = 0; num < count; num++) {
1096                 char name[256];
1097                 int attr;
1098                 
1099                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1100                         continue;
1101
1102                 DeviceInfo info;
1103
1104                 info.type = DEVICE_CUDA;
1105                 info.description = string(name);
1106                 info.id = string_printf("CUDA_%d", num);
1107                 info.num = num;
1108
1109                 int major, minor;
1110                 cuDeviceComputeCapability(&major, &minor, num);
1111                 info.advanced_shading = (major >= 2);
1112                 info.extended_images = (major >= 3);
1113                 info.pack_images = false;
1114
1115                 /* if device has a kernel timeout, assume it is used for display */
1116                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1117                         info.display_device = true;
1118                         display_devices.push_back(info);
1119                 }
1120                 else
1121                         devices.push_back(info);
1122         }
1123
1124         if(!display_devices.empty())
1125                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1126 }
1127
1128 string device_cuda_capabilities(void)
1129 {
1130         CUresult result = cuInit(0);
1131         if(result != CUDA_SUCCESS) {
1132                 if(result != CUDA_ERROR_NO_DEVICE) {
1133                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1134                 }
1135                 return "No CUDA device found";
1136         }
1137
1138         int count;
1139         result = cuDeviceGetCount(&count);
1140         if(result != CUDA_SUCCESS) {
1141                 return string("Error getting devices: ") + cuewErrorString(result);
1142         }
1143
1144         string capabilities = "";
1145         for(int num = 0; num < count; num++) {
1146                 char name[256];
1147                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1148                         continue;
1149                 }
1150                 capabilities += string("\t") + name + "\n";
1151                 int value;
1152 #define GET_ATTR(attr) \
1153                 { \
1154                         if(cuDeviceGetAttribute(&value, \
1155                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1156                                                 num) == CUDA_SUCCESS) \
1157                         { \
1158                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1159                                                               value); \
1160                         } \
1161                 } (void)0
1162                 /* TODO(sergey): Strip all attributes which are not useful for us
1163                  * or does not depend on the driver.
1164                  */
1165                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1166                 GET_ATTR(MAX_BLOCK_DIM_X);
1167                 GET_ATTR(MAX_BLOCK_DIM_Y);
1168                 GET_ATTR(MAX_BLOCK_DIM_Z);
1169                 GET_ATTR(MAX_GRID_DIM_X);
1170                 GET_ATTR(MAX_GRID_DIM_Y);
1171                 GET_ATTR(MAX_GRID_DIM_Z);
1172                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1173                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1174                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1175                 GET_ATTR(WARP_SIZE);
1176                 GET_ATTR(MAX_PITCH);
1177                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1178                 GET_ATTR(REGISTERS_PER_BLOCK);
1179                 GET_ATTR(CLOCK_RATE);
1180                 GET_ATTR(TEXTURE_ALIGNMENT);
1181                 GET_ATTR(GPU_OVERLAP);
1182                 GET_ATTR(MULTIPROCESSOR_COUNT);
1183                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1184                 GET_ATTR(INTEGRATED);
1185                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1186                 GET_ATTR(COMPUTE_MODE);
1187                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1188                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1189                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1190                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1191                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1192                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1193                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1194                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1195                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1196                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1197                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1198                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1199                 GET_ATTR(SURFACE_ALIGNMENT);
1200                 GET_ATTR(CONCURRENT_KERNELS);
1201                 GET_ATTR(ECC_ENABLED);
1202                 GET_ATTR(TCC_DRIVER);
1203                 GET_ATTR(MEMORY_CLOCK_RATE);
1204                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1205                 GET_ATTR(L2_CACHE_SIZE);
1206                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1207                 GET_ATTR(ASYNC_ENGINE_COUNT);
1208                 GET_ATTR(UNIFIED_ADDRESSING);
1209                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1210                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1211                 GET_ATTR(CAN_TEX2D_GATHER);
1212                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1213                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1214                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1215                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1216                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1217                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1218                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1219                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1220                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1221                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1222                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1223                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1224                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1225                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1226                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1227                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1228                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1229                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1230                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1231                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1232                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1233                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1234                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1235                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1236                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1237                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1238                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1239                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1240                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1241                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1242                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1243                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1244                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1245                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1246                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1247                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1248                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1249                 GET_ATTR(MANAGED_MEMORY);
1250                 GET_ATTR(MULTI_GPU_BOARD);
1251                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1252 #undef GET_ATTR
1253                 capabilities += "\n";
1254         }
1255
1256         return capabilities;
1257 }
1258
1259 CCL_NAMESPACE_END