5c9ca3454c62ad0fec92bf5e00b27243d3de90c8
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #include "cuew.h"
27 #include "util_debug.h"
28 #include "util_logging.h"
29 #include "util_map.h"
30 #include "util_md5.h"
31 #include "util_opengl.h"
32 #include "util_path.h"
33 #include "util_string.h"
34 #include "util_system.h"
35 #include "util_types.h"
36 #include "util_time.h"
37
38 /* use feature-adaptive kernel compilation.
39  * Requires CUDA toolkit to be installed and currently only works on Linux.
40  */
41 /* #define KERNEL_USE_ADAPTIVE */
42
43 CCL_NAMESPACE_BEGIN
44
45 class CUDADevice : public Device
46 {
47 public:
48         DedicatedTaskPool task_pool;
49         CUdevice cuDevice;
50         CUcontext cuContext;
51         CUmodule cuModule;
52         map<device_ptr, bool> tex_interp_map;
53         int cuDevId;
54         int cuDevArchitecture;
55         bool first_error;
56         bool use_texture_storage;
57
58         struct PixelMem {
59                 GLuint cuPBO;
60                 CUgraphicsResource cuPBOresource;
61                 GLuint cuTexId;
62                 int w, h;
63         };
64
65         map<device_ptr, PixelMem> pixel_mem_map;
66
67         CUdeviceptr cuda_device_ptr(device_ptr mem)
68         {
69                 return (CUdeviceptr)mem;
70         }
71
72         static bool have_precompiled_kernels()
73         {
74                 string cubins_path = path_get("lib");
75                 return path_exists(cubins_path);
76         }
77
78 /*#ifdef NDEBUG
79 #define cuda_abort()
80 #else
81 #define cuda_abort() abort()
82 #endif*/
83         void cuda_error_documentation()
84         {
85                 if(first_error) {
86                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
87                         fprintf(stderr, "http://www.blender.org/manual/render/cycles/gpu_rendering.html\n\n");
88                         first_error = false;
89                 }
90         }
91
92 #define cuda_assert(stmt) \
93         { \
94                 CUresult result = stmt; \
95                 \
96                 if(result != CUDA_SUCCESS) { \
97                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
98                         if(error_msg == "") \
99                                 error_msg = message; \
100                         fprintf(stderr, "%s\n", message.c_str()); \
101                         /*cuda_abort();*/ \
102                         cuda_error_documentation(); \
103                 } \
104         } (void)0
105
106         bool cuda_error_(CUresult result, const string& stmt)
107         {
108                 if(result == CUDA_SUCCESS)
109                         return false;
110
111                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
112                 if(error_msg == "")
113                         error_msg = message;
114                 fprintf(stderr, "%s\n", message.c_str());
115                 cuda_error_documentation();
116                 return true;
117         }
118
119 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
120
121         void cuda_error_message(const string& message)
122         {
123                 if(error_msg == "")
124                         error_msg = message;
125                 fprintf(stderr, "%s\n", message.c_str());
126                 cuda_error_documentation();
127         }
128
129         void cuda_push_context()
130         {
131                 cuda_assert(cuCtxSetCurrent(cuContext));
132         }
133
134         void cuda_pop_context()
135         {
136                 cuda_assert(cuCtxSetCurrent(NULL));
137         }
138
139         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
140         : Device(info, stats, background_)
141         {
142                 first_error = true;
143                 background = background_;
144                 use_texture_storage = true;
145
146                 cuDevId = info.num;
147                 cuDevice = 0;
148                 cuContext = 0;
149
150                 /* intialize */
151                 if(cuda_error(cuInit(0)))
152                         return;
153
154                 /* setup device and context */
155                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
156                         return;
157
158                 CUresult result;
159
160                 if(background) {
161                         result = cuCtxCreate(&cuContext, 0, cuDevice);
162                 }
163                 else {
164                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
165
166                         if(result != CUDA_SUCCESS) {
167                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
168                                 background = true;
169                         }
170                 }
171
172                 if(cuda_error_(result, "cuCtxCreate"))
173                         return;
174
175                 int major, minor;
176                 cuDeviceComputeCapability(&major, &minor, cuDevId);
177                 cuDevArchitecture = major*100 + minor*10;
178
179                 /* In order to use full 6GB of memory on Titan cards, use arrays instead
180                  * of textures. On earlier cards this seems slower, but on Titan it is
181                  * actually slightly faster in tests. */
182                 use_texture_storage = (cuDevArchitecture < 300);
183
184                 cuda_pop_context();
185         }
186
187         ~CUDADevice()
188         {
189                 task_pool.stop();
190
191                 cuda_assert(cuCtxDestroy(cuContext));
192         }
193
194         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
195         {
196                 int major, minor;
197                 cuDeviceComputeCapability(&major, &minor, cuDevId);
198
199                 /* We only support sm_20 and above */
200                 if(major < 2) {
201                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
202                         return false;
203                 }
204
205                 return true;
206         }
207
208         string compile_kernel(const DeviceRequestedFeatures& requested_features)
209         {
210                 /* compute cubin name */
211                 int major, minor;
212                 cuDeviceComputeCapability(&major, &minor, cuDevId);
213                 string cubin;
214
215                 /* attempt to use kernel provided with blender */
216                 if(requested_features.experimental)
217                         cubin = path_get(string_printf("lib/kernel_experimental_sm_%d%d.cubin", major, minor));
218                 else
219                         cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
220                 VLOG(1) << "Testing for pre-compiled kernel " << cubin;
221                 if(path_exists(cubin)) {
222                         VLOG(1) << "Using precompiled kernel";
223                         return cubin;
224                 }
225
226                 /* not found, try to use locally compiled kernel */
227                 string kernel_path = path_get("kernel");
228                 string md5 = path_files_md5_hash(kernel_path);
229
230 #ifdef KERNEL_USE_ADAPTIVE
231                 string feature_build_options = requested_features.get_build_options();
232                 string device_md5 = util_md5_string(feature_build_options);
233                 cubin = string_printf("cycles_kernel_%s_sm%d%d_%s.cubin",
234                                       device_md5.c_str(),
235                                       major, minor,
236                                       md5.c_str());
237 #else
238                 if(requested_features.experimental)
239                         cubin = string_printf("cycles_kernel_experimental_sm%d%d_%s.cubin", major, minor, md5.c_str());
240                 else
241                         cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
242 #endif
243
244                 cubin = path_user_get(path_join("cache", cubin));
245                 VLOG(1) << "Testing for locally compiled kernel " << cubin;
246                 /* if exists already, use it */
247                 if(path_exists(cubin)) {
248                         VLOG(1) << "Using locally compiled kernel";
249                         return cubin;
250                 }
251
252 #ifdef _WIN32
253                 if(have_precompiled_kernels()) {
254                         if(major < 2)
255                                 cuda_error_message(string_printf("CUDA device requires compute capability 2.0 or up, found %d.%d. Your GPU is not supported.", major, minor));
256                         else
257                                 cuda_error_message(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
258                         return "";
259                 }
260 #endif
261
262                 /* if not, find CUDA compiler */
263                 const char *nvcc = cuewCompilerPath();
264
265                 if(nvcc == NULL) {
266                         cuda_error_message("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
267                         return "";
268                 }
269
270                 int cuda_version = cuewCompilerVersion();
271                 VLOG(1) << "Found nvcc " << nvcc << ", CUDA version " << cuda_version;
272
273                 if(cuda_version == 0) {
274                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
275                         return "";
276                 }
277                 if(cuda_version < 60) {
278                         printf("Unsupported CUDA version %d.%d detected, you need CUDA 6.5.\n", cuda_version/10, cuda_version%10);
279                         return "";
280                 }
281                 else if(cuda_version != 65)
282                         printf("CUDA version %d.%d detected, build may succeed but only CUDA 6.5 is officially supported.\n", cuda_version/10, cuda_version%10);
283
284                 /* compile */
285                 string kernel = path_join(kernel_path, path_join("kernels", path_join("cuda", "kernel.cu")));
286                 string include = kernel_path;
287                 const int machine = system_cpu_bits();
288
289                 double starttime = time_dt();
290                 printf("Compiling CUDA kernel ...\n");
291
292                 path_create_directories(cubin);
293
294                 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
295                         "-o \"%s\" --ptxas-options=\"-v\" --use_fast_math -I\"%s\" "
296                         "-DNVCC -D__KERNEL_CUDA_VERSION__=%d",
297                         nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
298
299 #ifdef KERNEL_USE_ADAPTIVE
300                 command += " " + feature_build_options;
301 #else
302                 if(requested_features.experimental) {
303                         command += " -D__KERNEL_EXPERIMENTAL__";
304                 }
305 #endif
306
307                 const char* extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
308                 if(extra_cflags) {
309                         command += string(" ") + string(extra_cflags);
310                 }
311
312 #ifdef WITH_CYCLES_DEBUG
313                 command += " -D__KERNEL_DEBUG__";
314 #endif
315
316                 printf("%s\n", command.c_str());
317
318                 if(system(command.c_str()) == -1) {
319                         cuda_error_message("Failed to execute compilation command, see console for details.");
320                         return "";
321                 }
322
323                 /* verify if compilation succeeded */
324                 if(!path_exists(cubin)) {
325                         cuda_error_message("CUDA kernel compilation failed, see console for details.");
326                         return "";
327                 }
328
329                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
330
331                 return cubin;
332         }
333
334         bool load_kernels(const DeviceRequestedFeatures& requested_features)
335         {
336                 /* check if cuda init succeeded */
337                 if(cuContext == 0)
338                         return false;
339
340                 /* check if GPU is supported */
341                 if(!support_device(requested_features))
342                         return false;
343
344                 /* get kernel */
345                 string cubin = compile_kernel(requested_features);
346
347                 if(cubin == "")
348                         return false;
349
350                 /* open module */
351                 cuda_push_context();
352
353                 string cubin_data;
354                 CUresult result;
355
356                 if(path_read_text(cubin, cubin_data))
357                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
358                 else
359                         result = CUDA_ERROR_FILE_NOT_FOUND;
360
361                 if(cuda_error_(result, "cuModuleLoad"))
362                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
363
364                 cuda_pop_context();
365
366                 return (result == CUDA_SUCCESS);
367         }
368
369         void mem_alloc(device_memory& mem, MemoryType /*type*/)
370         {
371                 cuda_push_context();
372                 CUdeviceptr device_pointer;
373                 size_t size = mem.memory_size();
374                 cuda_assert(cuMemAlloc(&device_pointer, size));
375                 mem.device_pointer = (device_ptr)device_pointer;
376                 mem.device_size = size;
377                 stats.mem_alloc(size);
378                 cuda_pop_context();
379         }
380
381         void mem_copy_to(device_memory& mem)
382         {
383                 cuda_push_context();
384                 if(mem.device_pointer)
385                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
386                 cuda_pop_context();
387         }
388
389         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
390         {
391                 size_t offset = elem*y*w;
392                 size_t size = elem*w*h;
393
394                 cuda_push_context();
395                 if(mem.device_pointer) {
396                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
397                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
398                 }
399                 else {
400                         memset((char*)mem.data_pointer + offset, 0, size);
401                 }
402                 cuda_pop_context();
403         }
404
405         void mem_zero(device_memory& mem)
406         {
407                 memset((void*)mem.data_pointer, 0, mem.memory_size());
408
409                 cuda_push_context();
410                 if(mem.device_pointer)
411                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
412                 cuda_pop_context();
413         }
414
415         void mem_free(device_memory& mem)
416         {
417                 if(mem.device_pointer) {
418                         cuda_push_context();
419                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
420                         cuda_pop_context();
421
422                         mem.device_pointer = 0;
423
424                         stats.mem_free(mem.device_size);
425                         mem.device_size = 0;
426                 }
427         }
428
429         void const_copy_to(const char *name, void *host, size_t size)
430         {
431                 CUdeviceptr mem;
432                 size_t bytes;
433
434                 cuda_push_context();
435                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
436                 //assert(bytes == size);
437                 cuda_assert(cuMemcpyHtoD(mem, host, size));
438                 cuda_pop_context();
439         }
440
441         void tex_alloc(const char *name,
442                        device_memory& mem,
443                        InterpolationType interpolation,
444                        ExtensionType extension)
445         {
446                 /* todo: support 3D textures, only CPU for now */
447                 VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
448
449                 /* determine format */
450                 CUarray_format_enum format;
451                 size_t dsize = datatype_size(mem.data_type);
452                 size_t size = mem.memory_size();
453                 bool use_texture = (interpolation != INTERPOLATION_NONE) || use_texture_storage;
454
455                 if(use_texture) {
456
457                         switch(mem.data_type) {
458                                 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
459                                 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
460                                 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
461                                 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
462                                 default: assert(0); return;
463                         }
464
465                         CUtexref texref = NULL;
466
467                         cuda_push_context();
468                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, name));
469
470                         if(!texref) {
471                                 cuda_pop_context();
472                                 return;
473                         }
474
475                         if(interpolation != INTERPOLATION_NONE) {
476                                 CUarray handle = NULL;
477                                 CUDA_ARRAY_DESCRIPTOR desc;
478
479                                 desc.Width = mem.data_width;
480                                 desc.Height = mem.data_height;
481                                 desc.Format = format;
482                                 desc.NumChannels = mem.data_elements;
483
484                                 cuda_assert(cuArrayCreate(&handle, &desc));
485
486                                 if(!handle) {
487                                         cuda_pop_context();
488                                         return;
489                                 }
490
491                                 if(mem.data_height > 1) {
492                                         CUDA_MEMCPY2D param;
493                                         memset(&param, 0, sizeof(param));
494                                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
495                                         param.dstArray = handle;
496                                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
497                                         param.srcHost = (void*)mem.data_pointer;
498                                         param.srcPitch = mem.data_width*dsize*mem.data_elements;
499                                         param.WidthInBytes = param.srcPitch;
500                                         param.Height = mem.data_height;
501
502                                         cuda_assert(cuMemcpy2D(&param));
503                                 }
504                                 else
505                                         cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
506
507                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
508
509                                 if(interpolation == INTERPOLATION_CLOSEST) {
510                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
511                                 }
512                                 else if(interpolation == INTERPOLATION_LINEAR) {
513                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
514                                 }
515                                 else {/* CUBIC and SMART are unsupported for CUDA */
516                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
517                                 }
518                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
519
520                                 mem.device_pointer = (device_ptr)handle;
521                                 mem.device_size = size;
522
523                                 stats.mem_alloc(size);
524                         }
525                         else {
526                                 cuda_pop_context();
527
528                                 mem_alloc(mem, MEM_READ_ONLY);
529                                 mem_copy_to(mem);
530
531                                 cuda_push_context();
532
533                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
534                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
535                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
536                         }
537
538                         switch(extension) {
539                                 case EXTENSION_REPEAT:
540                                         cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_WRAP));
541                                         cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_WRAP));
542                                         break;
543                                 case EXTENSION_EXTEND:
544                                         cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_CLAMP));
545                                         cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_CLAMP));
546                                         break;
547                                 case EXTENSION_CLIP:
548                                         cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_BORDER));
549                                         cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_BORDER));
550                                         break;
551                         }
552                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
553
554                         cuda_pop_context();
555                 }
556                 else {
557                         mem_alloc(mem, MEM_READ_ONLY);
558                         mem_copy_to(mem);
559
560                         cuda_push_context();
561
562                         CUdeviceptr cumem;
563                         size_t cubytes;
564
565                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, name));
566
567                         if(cubytes == 8) {
568                                 /* 64 bit device pointer */
569                                 uint64_t ptr = mem.device_pointer;
570                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
571                         }
572                         else {
573                                 /* 32 bit device pointer */
574                                 uint32_t ptr = (uint32_t)mem.device_pointer;
575                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
576                         }
577
578                         cuda_pop_context();
579                 }
580
581                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
582         }
583
584         void tex_free(device_memory& mem)
585         {
586                 if(mem.device_pointer) {
587                         if(tex_interp_map[mem.device_pointer]) {
588                                 cuda_push_context();
589                                 cuArrayDestroy((CUarray)mem.device_pointer);
590                                 cuda_pop_context();
591
592                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
593                                 mem.device_pointer = 0;
594
595                                 stats.mem_free(mem.device_size);
596                                 mem.device_size = 0;
597                         }
598                         else {
599                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
600                                 mem_free(mem);
601                         }
602                 }
603         }
604
605         void path_trace(RenderTile& rtile, int sample, bool branched)
606         {
607                 if(have_error())
608                         return;
609
610                 cuda_push_context();
611
612                 CUfunction cuPathTrace;
613                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
614                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
615
616                 /* get kernel function */
617                 if(branched) {
618                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
619                 }
620                 else {
621                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
622                 }
623
624                 if(have_error())
625                         return;
626
627                 /* pass in parameters */
628                 void *args[] = {&d_buffer,
629                                                  &d_rng_state,
630                                                  &sample,
631                                                  &rtile.x,
632                                                  &rtile.y,
633                                                  &rtile.w,
634                                                  &rtile.h,
635                                                  &rtile.offset,
636                                                  &rtile.stride};
637
638                 /* launch kernel */
639                 int threads_per_block;
640                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
641
642                 /*int num_registers;
643                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
644
645                 printf("threads_per_block %d\n", threads_per_block);
646                 printf("num_registers %d\n", num_registers);*/
647
648                 int xthreads = (int)sqrt((float)threads_per_block);
649                 int ythreads = (int)sqrt((float)threads_per_block);
650                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
651                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
652
653                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
654
655                 cuda_assert(cuLaunchKernel(cuPathTrace,
656                                                                    xblocks , yblocks, 1, /* blocks */
657                                                                    xthreads, ythreads, 1, /* threads */
658                                                                    0, 0, args, 0));
659
660                 cuda_assert(cuCtxSynchronize());
661
662                 cuda_pop_context();
663         }
664
665         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
666         {
667                 if(have_error())
668                         return;
669
670                 cuda_push_context();
671
672                 CUfunction cuFilmConvert;
673                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
674                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
675
676                 /* get kernel function */
677                 if(rgba_half) {
678                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
679                 }
680                 else {
681                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
682                 }
683
684
685                 float sample_scale = 1.0f/(task.sample + 1);
686
687                 /* pass in parameters */
688                 void *args[] = {&d_rgba,
689                                                  &d_buffer,
690                                                  &sample_scale,
691                                                  &task.x,
692                                                  &task.y,
693                                                  &task.w,
694                                                  &task.h,
695                                                  &task.offset,
696                                                  &task.stride};
697
698                 /* launch kernel */
699                 int threads_per_block;
700                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
701
702                 int xthreads = (int)sqrt((float)threads_per_block);
703                 int ythreads = (int)sqrt((float)threads_per_block);
704                 int xblocks = (task.w + xthreads - 1)/xthreads;
705                 int yblocks = (task.h + ythreads - 1)/ythreads;
706
707                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
708
709                 cuda_assert(cuLaunchKernel(cuFilmConvert,
710                                                                    xblocks , yblocks, 1, /* blocks */
711                                                                    xthreads, ythreads, 1, /* threads */
712                                                                    0, 0, args, 0));
713
714                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
715
716                 cuda_pop_context();
717         }
718
719         void shader(DeviceTask& task)
720         {
721                 if(have_error())
722                         return;
723
724                 cuda_push_context();
725
726                 CUfunction cuShader;
727                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
728                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
729                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
730
731                 /* get kernel function */
732                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
733                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
734                 }
735                 else {
736                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
737                 }
738
739                 /* do tasks in smaller chunks, so we can cancel it */
740                 const int shader_chunk_size = 65536;
741                 const int start = task.shader_x;
742                 const int end = task.shader_x + task.shader_w;
743                 int offset = task.offset;
744
745                 bool canceled = false;
746                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
747                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
748                                 int shader_w = min(shader_chunk_size, end - shader_x);
749
750                                 /* pass in parameters */
751                                 void *args[8];
752                                 int arg = 0;
753                                 args[arg++] = &d_input;
754                                 args[arg++] = &d_output;
755                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
756                                         args[arg++] = &d_output_luma;
757                                 }
758                                 args[arg++] = &task.shader_eval_type;
759                                 args[arg++] = &shader_x;
760                                 args[arg++] = &shader_w;
761                                 args[arg++] = &offset;
762                                 args[arg++] = &sample;
763
764                                 /* launch kernel */
765                                 int threads_per_block;
766                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
767
768                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
769
770                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
771                                 cuda_assert(cuLaunchKernel(cuShader,
772                                                                                    xblocks , 1, 1, /* blocks */
773                                                                                    threads_per_block, 1, 1, /* threads */
774                                                                                    0, 0, args, 0));
775
776                                 cuda_assert(cuCtxSynchronize());
777
778                                 if(task.get_cancel()) {
779                                         canceled = false;
780                                         break;
781                                 }
782                         }
783
784                         task.update_progress(NULL);
785                 }
786
787                 cuda_pop_context();
788         }
789
790         CUdeviceptr map_pixels(device_ptr mem)
791         {
792                 if(!background) {
793                         PixelMem pmem = pixel_mem_map[mem];
794                         CUdeviceptr buffer;
795                         
796                         size_t bytes;
797                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
798                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
799                         
800                         return buffer;
801                 }
802
803                 return cuda_device_ptr(mem);
804         }
805
806         void unmap_pixels(device_ptr mem)
807         {
808                 if(!background) {
809                         PixelMem pmem = pixel_mem_map[mem];
810
811                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
812                 }
813         }
814
815         void pixels_alloc(device_memory& mem)
816         {
817                 if(!background) {
818                         PixelMem pmem;
819
820                         pmem.w = mem.data_width;
821                         pmem.h = mem.data_height;
822
823                         cuda_push_context();
824
825                         glGenBuffers(1, &pmem.cuPBO);
826                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
827                         if(mem.data_type == TYPE_HALF)
828                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
829                         else
830                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
831                         
832                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
833                         
834                         glGenTextures(1, &pmem.cuTexId);
835                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
836                         if(mem.data_type == TYPE_HALF)
837                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
838                         else
839                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
840                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
841                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
842                         glBindTexture(GL_TEXTURE_2D, 0);
843                         
844                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
845
846                         if(result == CUDA_SUCCESS) {
847                                 cuda_pop_context();
848
849                                 mem.device_pointer = pmem.cuTexId;
850                                 pixel_mem_map[mem.device_pointer] = pmem;
851
852                                 mem.device_size = mem.memory_size();
853                                 stats.mem_alloc(mem.device_size);
854
855                                 return;
856                         }
857                         else {
858                                 /* failed to register buffer, fallback to no interop */
859                                 glDeleteBuffers(1, &pmem.cuPBO);
860                                 glDeleteTextures(1, &pmem.cuTexId);
861
862                                 cuda_pop_context();
863
864                                 background = true;
865                         }
866                 }
867
868                 Device::pixels_alloc(mem);
869         }
870
871         void pixels_copy_from(device_memory& mem, int y, int w, int h)
872         {
873                 if(!background) {
874                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
875
876                         cuda_push_context();
877
878                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
879                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
880                         size_t offset = sizeof(uchar)*4*y*w;
881                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
882                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
883                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
884
885                         cuda_pop_context();
886
887                         return;
888                 }
889
890                 Device::pixels_copy_from(mem, y, w, h);
891         }
892
893         void pixels_free(device_memory& mem)
894         {
895                 if(mem.device_pointer) {
896                         if(!background) {
897                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
898
899                                 cuda_push_context();
900
901                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
902                                 glDeleteBuffers(1, &pmem.cuPBO);
903                                 glDeleteTextures(1, &pmem.cuTexId);
904
905                                 cuda_pop_context();
906
907                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
908                                 mem.device_pointer = 0;
909
910                                 stats.mem_free(mem.device_size);
911                                 mem.device_size = 0;
912
913                                 return;
914                         }
915
916                         Device::pixels_free(mem);
917                 }
918         }
919
920         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
921                 const DeviceDrawParams &draw_params)
922         {
923                 if(!background) {
924                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
925                         float *vpointer;
926
927                         cuda_push_context();
928
929                         /* for multi devices, this assumes the inefficient method that we allocate
930                          * all pixels on the device even though we only render to a subset */
931                         size_t offset = 4*y*w;
932
933                         if(mem.data_type == TYPE_HALF)
934                                 offset *= sizeof(GLhalf);
935                         else
936                                 offset *= sizeof(uint8_t);
937
938                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
939                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
940                         if(mem.data_type == TYPE_HALF)
941                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
942                         else
943                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
944                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
945                         
946                         glEnable(GL_TEXTURE_2D);
947                         
948                         if(transparent) {
949                                 glEnable(GL_BLEND);
950                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
951                         }
952
953                         glColor3f(1.0f, 1.0f, 1.0f);
954
955                         if(draw_params.bind_display_space_shader_cb) {
956                                 draw_params.bind_display_space_shader_cb();
957                         }
958
959                         if(!vertex_buffer)
960                                 glGenBuffers(1, &vertex_buffer);
961
962                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
963                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
964                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
965
966                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
967
968                         if(vpointer) {
969                                 /* texture coordinate - vertex pair */
970                                 vpointer[0] = 0.0f;
971                                 vpointer[1] = 0.0f;
972                                 vpointer[2] = dx;
973                                 vpointer[3] = dy;
974
975                                 vpointer[4] = (float)w/(float)pmem.w;
976                                 vpointer[5] = 0.0f;
977                                 vpointer[6] = (float)width + dx;
978                                 vpointer[7] = dy;
979
980                                 vpointer[8] = (float)w/(float)pmem.w;
981                                 vpointer[9] = (float)h/(float)pmem.h;
982                                 vpointer[10] = (float)width + dx;
983                                 vpointer[11] = (float)height + dy;
984
985                                 vpointer[12] = 0.0f;
986                                 vpointer[13] = (float)h/(float)pmem.h;
987                                 vpointer[14] = dx;
988                                 vpointer[15] = (float)height + dy;
989
990                                 glUnmapBuffer(GL_ARRAY_BUFFER);
991                         }
992
993                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
994                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
995
996                         glEnableClientState(GL_VERTEX_ARRAY);
997                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
998
999                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1000
1001                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1002                         glDisableClientState(GL_VERTEX_ARRAY);
1003
1004                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1005
1006                         if(draw_params.unbind_display_space_shader_cb) {
1007                                 draw_params.unbind_display_space_shader_cb();
1008                         }
1009
1010                         if(transparent)
1011                                 glDisable(GL_BLEND);
1012                         
1013                         glBindTexture(GL_TEXTURE_2D, 0);
1014                         glDisable(GL_TEXTURE_2D);
1015
1016                         cuda_pop_context();
1017
1018                         return;
1019                 }
1020
1021                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1022         }
1023
1024         void thread_run(DeviceTask *task)
1025         {
1026                 if(task->type == DeviceTask::PATH_TRACE) {
1027                         RenderTile tile;
1028                         
1029                         bool branched = task->integrator_branched;
1030                         
1031                         /* keep rendering tiles until done */
1032                         while(task->acquire_tile(this, tile)) {
1033                                 int start_sample = tile.start_sample;
1034                                 int end_sample = tile.start_sample + tile.num_samples;
1035
1036                                 for(int sample = start_sample; sample < end_sample; sample++) {
1037                                         if(task->get_cancel()) {
1038                                                 if(task->need_finish_queue == false)
1039                                                         break;
1040                                         }
1041
1042                                         path_trace(tile, sample, branched);
1043
1044                                         tile.sample = sample + 1;
1045
1046                                         task->update_progress(&tile);
1047                                 }
1048
1049                                 task->release_tile(tile);
1050                         }
1051                 }
1052                 else if(task->type == DeviceTask::SHADER) {
1053                         shader(*task);
1054
1055                         cuda_push_context();
1056                         cuda_assert(cuCtxSynchronize());
1057                         cuda_pop_context();
1058                 }
1059         }
1060
1061         class CUDADeviceTask : public DeviceTask {
1062         public:
1063                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1064                 : DeviceTask(task)
1065                 {
1066                         run = function_bind(&CUDADevice::thread_run, device, this);
1067                 }
1068         };
1069
1070         int get_split_task_count(DeviceTask& /*task*/)
1071         {
1072                 return 1;
1073         }
1074
1075         void task_add(DeviceTask& task)
1076         {
1077                 if(task.type == DeviceTask::FILM_CONVERT) {
1078                         /* must be done in main thread due to opengl access */
1079                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1080
1081                         cuda_push_context();
1082                         cuda_assert(cuCtxSynchronize());
1083                         cuda_pop_context();
1084                 }
1085                 else {
1086                         task_pool.push(new CUDADeviceTask(this, task));
1087                 }
1088         }
1089
1090         void task_wait()
1091         {
1092                 task_pool.wait();
1093         }
1094
1095         void task_cancel()
1096         {
1097                 task_pool.cancel();
1098         }
1099 };
1100
1101 bool device_cuda_init(void)
1102 {
1103         static bool initialized = false;
1104         static bool result = false;
1105
1106         if(initialized)
1107                 return result;
1108
1109         initialized = true;
1110         int cuew_result = cuewInit();
1111         if(cuew_result == CUEW_SUCCESS) {
1112                 VLOG(1) << "CUEW initialization succeeded";
1113                 if(CUDADevice::have_precompiled_kernels()) {
1114                         VLOG(1) << "Found precompiled  kernels";
1115                         result = true;
1116                 }
1117 #ifndef _WIN32
1118                 else if(cuewCompilerPath() != NULL) {
1119                         VLOG(1) << "Found CUDA compiled " << cuewCompilerPath();
1120                         result = true;
1121                 }
1122                 else {
1123                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1124                                 << " unable to use CUDA";
1125                 }
1126 #endif
1127         }
1128         else {
1129                 VLOG(1) << "CUEW initialization failed: "
1130                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1131                             ? "Error setting up atexit() handler"
1132                             : "Error opening the library");
1133         }
1134
1135         return result;
1136 }
1137
1138 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1139 {
1140         return new CUDADevice(info, stats, background);
1141 }
1142
1143 void device_cuda_info(vector<DeviceInfo>& devices)
1144 {
1145         CUresult result;
1146         int count = 0;
1147
1148         result = cuInit(0);
1149         if(result != CUDA_SUCCESS) {
1150                 if(result != CUDA_ERROR_NO_DEVICE)
1151                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1152                 return;
1153         }
1154
1155         result = cuDeviceGetCount(&count);
1156         if(result != CUDA_SUCCESS) {
1157                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1158                 return;
1159         }
1160         
1161         vector<DeviceInfo> display_devices;
1162
1163         for(int num = 0; num < count; num++) {
1164                 char name[256];
1165                 int attr;
1166
1167                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1168                         continue;
1169
1170                 int major, minor;
1171                 cuDeviceComputeCapability(&major, &minor, num);
1172                 if(major < 2) {
1173                         continue;
1174                 }
1175
1176                 DeviceInfo info;
1177
1178                 info.type = DEVICE_CUDA;
1179                 info.description = string(name);
1180                 info.id = string_printf("CUDA_%d", num);
1181                 info.num = num;
1182
1183                 info.advanced_shading = (major >= 2);
1184                 info.extended_images = (major >= 3);
1185                 info.pack_images = false;
1186
1187                 /* if device has a kernel timeout, assume it is used for display */
1188                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1189                         info.display_device = true;
1190                         display_devices.push_back(info);
1191                 }
1192                 else
1193                         devices.push_back(info);
1194         }
1195
1196         if(!display_devices.empty())
1197                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1198 }
1199
1200 string device_cuda_capabilities(void)
1201 {
1202         CUresult result = cuInit(0);
1203         if(result != CUDA_SUCCESS) {
1204                 if(result != CUDA_ERROR_NO_DEVICE) {
1205                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1206                 }
1207                 return "No CUDA device found\n";
1208         }
1209
1210         int count;
1211         result = cuDeviceGetCount(&count);
1212         if(result != CUDA_SUCCESS) {
1213                 return string("Error getting devices: ") + cuewErrorString(result);
1214         }
1215
1216         string capabilities = "";
1217         for(int num = 0; num < count; num++) {
1218                 char name[256];
1219                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1220                         continue;
1221                 }
1222                 capabilities += string("\t") + name + "\n";
1223                 int value;
1224 #define GET_ATTR(attr) \
1225                 { \
1226                         if(cuDeviceGetAttribute(&value, \
1227                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1228                                                 num) == CUDA_SUCCESS) \
1229                         { \
1230                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1231                                                               value); \
1232                         } \
1233                 } (void)0
1234                 /* TODO(sergey): Strip all attributes which are not useful for us
1235                  * or does not depend on the driver.
1236                  */
1237                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1238                 GET_ATTR(MAX_BLOCK_DIM_X);
1239                 GET_ATTR(MAX_BLOCK_DIM_Y);
1240                 GET_ATTR(MAX_BLOCK_DIM_Z);
1241                 GET_ATTR(MAX_GRID_DIM_X);
1242                 GET_ATTR(MAX_GRID_DIM_Y);
1243                 GET_ATTR(MAX_GRID_DIM_Z);
1244                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1245                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1246                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1247                 GET_ATTR(WARP_SIZE);
1248                 GET_ATTR(MAX_PITCH);
1249                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1250                 GET_ATTR(REGISTERS_PER_BLOCK);
1251                 GET_ATTR(CLOCK_RATE);
1252                 GET_ATTR(TEXTURE_ALIGNMENT);
1253                 GET_ATTR(GPU_OVERLAP);
1254                 GET_ATTR(MULTIPROCESSOR_COUNT);
1255                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1256                 GET_ATTR(INTEGRATED);
1257                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1258                 GET_ATTR(COMPUTE_MODE);
1259                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1260                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1261                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1262                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1263                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1264                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1265                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1266                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1267                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1268                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1269                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1270                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1271                 GET_ATTR(SURFACE_ALIGNMENT);
1272                 GET_ATTR(CONCURRENT_KERNELS);
1273                 GET_ATTR(ECC_ENABLED);
1274                 GET_ATTR(TCC_DRIVER);
1275                 GET_ATTR(MEMORY_CLOCK_RATE);
1276                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1277                 GET_ATTR(L2_CACHE_SIZE);
1278                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1279                 GET_ATTR(ASYNC_ENGINE_COUNT);
1280                 GET_ATTR(UNIFIED_ADDRESSING);
1281                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1282                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1283                 GET_ATTR(CAN_TEX2D_GATHER);
1284                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1285                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1286                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1287                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1288                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1289                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1290                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1291                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1292                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1293                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1294                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1295                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1296                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1297                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1298                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1299                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1300                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1301                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1302                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1303                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1304                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1305                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1306                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1307                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1308                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1309                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1310                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1311                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1312                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1313                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1314                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1315                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1316                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1317                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1318                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1319                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1320                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1321                 GET_ATTR(MANAGED_MEMORY);
1322                 GET_ATTR(MULTI_GPU_BOARD);
1323                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1324 #undef GET_ATTR
1325                 capabilities += "\n";
1326         }
1327
1328         return capabilities;
1329 }
1330
1331 CCL_NAMESPACE_END