7fc621365124b84ecce659d1d3acb47a4ec926b7
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #ifdef WITH_CUDA_DYNLOAD
27 #  include "cuew.h"
28 #else
29 #  include "util_opengl.h"
30 #  include <cuda.h>
31 #  include <cudaGL.h>
32 #endif
33 #include "util_debug.h"
34 #include "util_logging.h"
35 #include "util_map.h"
36 #include "util_md5.h"
37 #include "util_opengl.h"
38 #include "util_path.h"
39 #include "util_string.h"
40 #include "util_system.h"
41 #include "util_types.h"
42 #include "util_time.h"
43
44 CCL_NAMESPACE_BEGIN
45
46 #ifndef WITH_CUDA_DYNLOAD
47
48 /* Transparently implement some functions, so majority of the file does not need
49  * to worry about difference between dynamically loaded and linked CUDA at all.
50  */
51
52 namespace {
53
54 const char *cuewErrorString(CUresult result)
55 {
56         /* We can only give error code here without major code duplication, that
57          * should be enough since dynamic loading is only being disabled by folks
58          * who knows what they're doing anyway.
59          *
60          * NOTE: Avoid call from several threads.
61          */
62         static string error;
63         error = string_printf("%d", result);
64         return error.c_str();
65 }
66
67 const char *cuewCompilerPath(void)
68 {
69         return CYCLES_CUDA_NVCC_EXECUTABLE;
70 }
71
72 int cuewCompilerVersion(void)
73 {
74         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
75 }
76
77 }  /* namespace */
78 #endif  /* WITH_CUDA_DYNLOAD */
79
80 class CUDADevice : public Device
81 {
82 public:
83         DedicatedTaskPool task_pool;
84         CUdevice cuDevice;
85         CUcontext cuContext;
86         CUmodule cuModule;
87         map<device_ptr, bool> tex_interp_map;
88         map<device_ptr, uint> tex_bindless_map;
89         int cuDevId;
90         int cuDevArchitecture;
91         bool first_error;
92
93         struct PixelMem {
94                 GLuint cuPBO;
95                 CUgraphicsResource cuPBOresource;
96                 GLuint cuTexId;
97                 int w, h;
98         };
99
100         map<device_ptr, PixelMem> pixel_mem_map;
101
102         /* Bindless Textures */
103         device_vector<uint> bindless_mapping;
104         bool need_bindless_mapping;
105
106         CUdeviceptr cuda_device_ptr(device_ptr mem)
107         {
108                 return (CUdeviceptr)mem;
109         }
110
111         static bool have_precompiled_kernels()
112         {
113                 string cubins_path = path_get("lib");
114                 return path_exists(cubins_path);
115         }
116
117 /*#ifdef NDEBUG
118 #define cuda_abort()
119 #else
120 #define cuda_abort() abort()
121 #endif*/
122         void cuda_error_documentation()
123         {
124                 if(first_error) {
125                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
126                         fprintf(stderr, "http://www.blender.org/manual/render/cycles/gpu_rendering.html\n\n");
127                         first_error = false;
128                 }
129         }
130
131 #define cuda_assert(stmt) \
132         { \
133                 CUresult result = stmt; \
134                 \
135                 if(result != CUDA_SUCCESS) { \
136                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
137                         if(error_msg == "") \
138                                 error_msg = message; \
139                         fprintf(stderr, "%s\n", message.c_str()); \
140                         /*cuda_abort();*/ \
141                         cuda_error_documentation(); \
142                 } \
143         } (void)0
144
145         bool cuda_error_(CUresult result, const string& stmt)
146         {
147                 if(result == CUDA_SUCCESS)
148                         return false;
149
150                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
151                 if(error_msg == "")
152                         error_msg = message;
153                 fprintf(stderr, "%s\n", message.c_str());
154                 cuda_error_documentation();
155                 return true;
156         }
157
158 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
159
160         void cuda_error_message(const string& message)
161         {
162                 if(error_msg == "")
163                         error_msg = message;
164                 fprintf(stderr, "%s\n", message.c_str());
165                 cuda_error_documentation();
166         }
167
168         void cuda_push_context()
169         {
170                 cuda_assert(cuCtxSetCurrent(cuContext));
171         }
172
173         void cuda_pop_context()
174         {
175                 cuda_assert(cuCtxSetCurrent(NULL));
176         }
177
178         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
179         : Device(info, stats, background_)
180         {
181                 first_error = true;
182                 background = background_;
183
184                 cuDevId = info.num;
185                 cuDevice = 0;
186                 cuContext = 0;
187
188                 need_bindless_mapping = false;
189
190                 /* intialize */
191                 if(cuda_error(cuInit(0)))
192                         return;
193
194                 /* setup device and context */
195                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
196                         return;
197
198                 CUresult result;
199
200                 if(background) {
201                         result = cuCtxCreate(&cuContext, 0, cuDevice);
202                 }
203                 else {
204                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
205
206                         if(result != CUDA_SUCCESS) {
207                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
208                                 background = true;
209                         }
210                 }
211
212                 if(cuda_error_(result, "cuCtxCreate"))
213                         return;
214
215                 int major, minor;
216                 cuDeviceComputeCapability(&major, &minor, cuDevId);
217                 cuDevArchitecture = major*100 + minor*10;
218
219                 cuda_pop_context();
220         }
221
222         ~CUDADevice()
223         {
224                 task_pool.stop();
225
226                 if(info.has_bindless_textures) {
227                         tex_free(bindless_mapping);
228                 }
229
230                 cuda_assert(cuCtxDestroy(cuContext));
231         }
232
233         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
234         {
235                 int major, minor;
236                 cuDeviceComputeCapability(&major, &minor, cuDevId);
237
238                 /* We only support sm_20 and above */
239                 if(major < 2) {
240                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
241                         return false;
242                 }
243
244                 return true;
245         }
246
247         bool use_adaptive_compilation()
248         {
249                 return DebugFlags().cuda.adaptive_compile;
250         }
251
252         string compile_kernel(const DeviceRequestedFeatures& requested_features)
253         {
254                 /* Compute cubin name. */
255                 int major, minor;
256                 cuDeviceComputeCapability(&major, &minor, cuDevId);
257                 string cubin;
258
259                 /* Adaptive Compile.
260                  * If enabled, always use that */
261                 bool use_adaptive_compile = use_adaptive_compilation();
262
263                 /* Attempt to use kernel provided with Blender. */
264                 if(!use_adaptive_compile) {
265                         cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
266                         VLOG(1) << "Testing for pre-compiled kernel " << cubin;
267                         if(path_exists(cubin)) {
268                                 VLOG(1) << "Using precompiled kernel";
269                                 return cubin;
270                         }
271                 }
272
273                 /* Try to use locally compiled kernel. */
274                 string kernel_path = path_get("kernel");
275                 string md5 = path_files_md5_hash(kernel_path);
276
277                 string feature_build_options;
278                 if(use_adaptive_compile) {
279                         feature_build_options = requested_features.get_build_options();
280                         string device_md5 = util_md5_string(feature_build_options);
281                         cubin = string_printf("cycles_kernel_%s_sm%d%d_%s.cubin",
282                                           device_md5.c_str(),
283                                           major, minor,
284                                           md5.c_str());
285                 }
286                 else {
287                         cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
288                 }
289
290                 cubin = path_user_get(path_join("cache", cubin));
291                 VLOG(1) << "Testing for locally compiled kernel " << cubin;
292                 /* If exists already, use it. */
293                 if(path_exists(cubin)) {
294                         VLOG(1) << "Using locally compiled kernel";
295                         return cubin;
296                 }
297
298 #ifdef _WIN32
299                 if(have_precompiled_kernels()) {
300                         if(major < 2)
301                                 cuda_error_message(string_printf("CUDA device requires compute capability 2.0 or up, found %d.%d. Your GPU is not supported.", major, minor));
302                         else
303                                 cuda_error_message(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
304                         return "";
305                 }
306 #endif
307
308                 /* If not, find CUDA compiler. */
309                 const char *nvcc = cuewCompilerPath();
310
311                 if(nvcc == NULL) {
312                         cuda_error_message("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
313                         return "";
314                 }
315
316                 int cuda_version = cuewCompilerVersion();
317                 VLOG(1) << "Found nvcc " << nvcc << ", CUDA version " << cuda_version;
318
319                 if(cuda_version == 0) {
320                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
321                         return "";
322                 }
323                 if(cuda_version < 60) {
324                         printf("Unsupported CUDA version %d.%d detected, you need CUDA 7.5.\n", cuda_version/10, cuda_version%10);
325                         return "";
326                 }
327                 else if(cuda_version != 75)
328                         printf("CUDA version %d.%d detected, build may succeed but only CUDA 7.5 is officially supported.\n", cuda_version/10, cuda_version%10);
329
330                 /* Compile. */
331                 string kernel = path_join(kernel_path, path_join("kernels", path_join("cuda", "kernel.cu")));
332                 string include = kernel_path;
333                 const int machine = system_cpu_bits();
334
335                 double starttime = time_dt();
336                 printf("Compiling CUDA kernel ...\n");
337
338                 path_create_directories(cubin);
339
340                 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
341                         "-o \"%s\" --ptxas-options=\"-v\" --use_fast_math -I\"%s\" "
342                         "-DNVCC -D__KERNEL_CUDA_VERSION__=%d",
343                         nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
344
345                 if(use_adaptive_compile)
346                         command += " " + feature_build_options;
347
348                 const char* extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
349                 if(extra_cflags) {
350                         command += string(" ") + string(extra_cflags);
351                 }
352
353 #ifdef WITH_CYCLES_DEBUG
354                 command += " -D__KERNEL_DEBUG__";
355 #endif
356
357                 printf("%s\n", command.c_str());
358
359                 if(system(command.c_str()) == -1) {
360                         cuda_error_message("Failed to execute compilation command, see console for details.");
361                         return "";
362                 }
363
364                 /* Verify if compilation succeeded */
365                 if(!path_exists(cubin)) {
366                         cuda_error_message("CUDA kernel compilation failed, see console for details.");
367                         return "";
368                 }
369
370                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
371
372                 return cubin;
373         }
374
375         bool load_kernels(const DeviceRequestedFeatures& requested_features)
376         {
377                 /* check if cuda init succeeded */
378                 if(cuContext == 0)
379                         return false;
380
381                 /* check if GPU is supported */
382                 if(!support_device(requested_features))
383                         return false;
384
385                 /* get kernel */
386                 string cubin = compile_kernel(requested_features);
387
388                 if(cubin == "")
389                         return false;
390
391                 /* open module */
392                 cuda_push_context();
393
394                 string cubin_data;
395                 CUresult result;
396
397                 if(path_read_text(cubin, cubin_data))
398                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
399                 else
400                         result = CUDA_ERROR_FILE_NOT_FOUND;
401
402                 if(cuda_error_(result, "cuModuleLoad"))
403                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
404
405                 cuda_pop_context();
406
407                 return (result == CUDA_SUCCESS);
408         }
409
410         void load_bindless_mapping()
411         {
412                 if(info.has_bindless_textures && need_bindless_mapping) {
413                         tex_free(bindless_mapping);
414                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
415                         need_bindless_mapping = false;
416                 }
417         }
418
419         void mem_alloc(device_memory& mem, MemoryType /*type*/)
420         {
421                 cuda_push_context();
422                 CUdeviceptr device_pointer;
423                 size_t size = mem.memory_size();
424                 cuda_assert(cuMemAlloc(&device_pointer, size));
425                 mem.device_pointer = (device_ptr)device_pointer;
426                 mem.device_size = size;
427                 stats.mem_alloc(size);
428                 cuda_pop_context();
429         }
430
431         void mem_copy_to(device_memory& mem)
432         {
433                 cuda_push_context();
434                 if(mem.device_pointer)
435                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
436                 cuda_pop_context();
437         }
438
439         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
440         {
441                 size_t offset = elem*y*w;
442                 size_t size = elem*w*h;
443
444                 cuda_push_context();
445                 if(mem.device_pointer) {
446                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
447                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
448                 }
449                 else {
450                         memset((char*)mem.data_pointer + offset, 0, size);
451                 }
452                 cuda_pop_context();
453         }
454
455         void mem_zero(device_memory& mem)
456         {
457                 memset((void*)mem.data_pointer, 0, mem.memory_size());
458
459                 cuda_push_context();
460                 if(mem.device_pointer)
461                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
462                 cuda_pop_context();
463         }
464
465         void mem_free(device_memory& mem)
466         {
467                 if(mem.device_pointer) {
468                         cuda_push_context();
469                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
470                         cuda_pop_context();
471
472                         mem.device_pointer = 0;
473
474                         stats.mem_free(mem.device_size);
475                         mem.device_size = 0;
476                 }
477         }
478
479         void const_copy_to(const char *name, void *host, size_t size)
480         {
481                 CUdeviceptr mem;
482                 size_t bytes;
483
484                 cuda_push_context();
485                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
486                 //assert(bytes == size);
487                 cuda_assert(cuMemcpyHtoD(mem, host, size));
488                 cuda_pop_context();
489         }
490
491         void tex_alloc(const char *name,
492                        device_memory& mem,
493                        InterpolationType interpolation,
494                        ExtensionType extension)
495         {
496                 VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
497
498                 /* Check if we are on sm_30 or above.
499                  * We use arrays and bindles textures for storage there */
500                 bool has_bindless_textures = info.has_bindless_textures;
501
502                 /* General variables for both architectures */
503                 string bind_name = name;
504                 size_t dsize = datatype_size(mem.data_type);
505                 size_t size = mem.memory_size();
506
507                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
508                 switch(extension) {
509                         case EXTENSION_REPEAT:
510                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
511                                 break;
512                         case EXTENSION_EXTEND:
513                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
514                                 break;
515                         case EXTENSION_CLIP:
516                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
517                                 break;
518                         default:
519                                 assert(0);
520                                 break;
521                 }
522
523                 CUfilter_mode filter_mode;
524                 if(interpolation == INTERPOLATION_CLOSEST) {
525                         filter_mode = CU_TR_FILTER_MODE_POINT;
526                 }
527                 else {
528                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
529                 }
530
531                 CUarray_format_enum format;
532                 switch(mem.data_type) {
533                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
534                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
535                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
536                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
537                         default: assert(0); return;
538                 }
539
540                 /* General variables for Fermi */
541                 CUtexref texref = NULL;
542
543                 if(!has_bindless_textures) {
544                         if(mem.data_depth > 1) {
545                                 /* Kernel uses different bind names for 2d and 3d float textures,
546                                  * so we have to adjust couple of things here.
547                                  */
548                                 vector<string> tokens;
549                                 string_split(tokens, name, "_");
550                                 bind_name = string_printf("__tex_image_%s_3d_%s",
551                                                           tokens[2].c_str(),
552                                                           tokens[3].c_str());
553                         }
554
555                         cuda_push_context();
556                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
557                         cuda_pop_context();
558
559                         if(!texref) {
560                                 return;
561                         }
562                 }
563
564                 /* Data Storage */
565                 if(interpolation == INTERPOLATION_NONE) {
566                         if(has_bindless_textures) {
567                                 mem_alloc(mem, MEM_READ_ONLY);
568                                 mem_copy_to(mem);
569
570                                 cuda_push_context();
571
572                                 CUdeviceptr cumem;
573                                 size_t cubytes;
574
575                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
576
577                                 if(cubytes == 8) {
578                                         /* 64 bit device pointer */
579                                         uint64_t ptr = mem.device_pointer;
580                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
581                                 }
582                                 else {
583                                         /* 32 bit device pointer */
584                                         uint32_t ptr = (uint32_t)mem.device_pointer;
585                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
586                                 }
587
588                                 cuda_pop_context();
589                         }
590                         else {
591                                 mem_alloc(mem, MEM_READ_ONLY);
592                                 mem_copy_to(mem);
593
594                                 cuda_push_context();
595
596                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
597                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
598                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
599
600                                 cuda_pop_context();
601                         }
602                 }
603                 /* Texture Storage */
604                 else {
605                         CUarray handle = NULL;
606
607                         cuda_push_context();
608
609                         if(mem.data_depth > 1) {
610                                 CUDA_ARRAY3D_DESCRIPTOR desc;
611
612                                 desc.Width = mem.data_width;
613                                 desc.Height = mem.data_height;
614                                 desc.Depth = mem.data_depth;
615                                 desc.Format = format;
616                                 desc.NumChannels = mem.data_elements;
617                                 desc.Flags = 0;
618
619                                 cuda_assert(cuArray3DCreate(&handle, &desc));
620                         }
621                         else {
622                                 CUDA_ARRAY_DESCRIPTOR desc;
623
624                                 desc.Width = mem.data_width;
625                                 desc.Height = mem.data_height;
626                                 desc.Format = format;
627                                 desc.NumChannels = mem.data_elements;
628
629                                 cuda_assert(cuArrayCreate(&handle, &desc));
630                         }
631
632                         if(!handle) {
633                                 cuda_pop_context();
634                                 return;
635                         }
636
637                         /* Allocate 3D, 2D or 1D memory */
638                         if(mem.data_depth > 1) {
639                                 CUDA_MEMCPY3D param;
640                                 memset(&param, 0, sizeof(param));
641                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
642                                 param.dstArray = handle;
643                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
644                                 param.srcHost = (void*)mem.data_pointer;
645                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
646                                 param.WidthInBytes = param.srcPitch;
647                                 param.Height = mem.data_height;
648                                 param.Depth = mem.data_depth;
649
650                                 cuda_assert(cuMemcpy3D(&param));
651                         }
652                         else if(mem.data_height > 1) {
653                                 CUDA_MEMCPY2D param;
654                                 memset(&param, 0, sizeof(param));
655                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
656                                 param.dstArray = handle;
657                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
658                                 param.srcHost = (void*)mem.data_pointer;
659                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
660                                 param.WidthInBytes = param.srcPitch;
661                                 param.Height = mem.data_height;
662
663                                 cuda_assert(cuMemcpy2D(&param));
664                         }
665                         else
666                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
667
668                         /* Fermi and Kepler */
669                         mem.device_pointer = (device_ptr)handle;
670                         mem.device_size = size;
671
672                         stats.mem_alloc(size);
673
674                         /* Bindless Textures - Kepler */
675                         if(has_bindless_textures) {
676                                 int flat_slot = 0;
677                                 if(string_startswith(name, "__tex_image")) {
678                                         int pos =  string(name).rfind("_");
679                                         flat_slot = atoi(name + pos + 1);
680                                 }
681                                 else {
682                                         assert(0);
683                                 }
684
685                                 CUDA_RESOURCE_DESC resDesc;
686                                 memset(&resDesc, 0, sizeof(resDesc));
687                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
688                                 resDesc.res.array.hArray = handle;
689                                 resDesc.flags = 0;
690
691                                 CUDA_TEXTURE_DESC texDesc;
692                                 memset(&texDesc, 0, sizeof(texDesc));
693                                 texDesc.addressMode[0] = address_mode;
694                                 texDesc.addressMode[1] = address_mode;
695                                 texDesc.addressMode[2] = address_mode;
696                                 texDesc.filterMode = filter_mode;
697                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
698
699                                 CUtexObject tex = 0;
700                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
701
702                                 /* Safety check */
703                                 if((uint)tex > UINT_MAX) {
704                                         assert(0);
705                                 }
706
707                                 /* Resize once */
708                                 if(flat_slot >= bindless_mapping.size())
709                                         bindless_mapping.resize(4096); /*TODO(dingto): Make this a variable */
710
711                                 /* Set Mapping and tag that we need to (re-)upload to device */
712                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
713                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
714                                 need_bindless_mapping = true;
715                         }
716                         /* Regular Textures - Fermi */
717                         else {
718                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
719                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
720                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
721                         }
722
723                         cuda_pop_context();
724                 }
725
726                 /* Fermi, Data and Image Textures */
727                 if(!has_bindless_textures) {
728                         cuda_push_context();
729
730                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
731                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
732                         if(mem.data_depth > 1) {
733                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
734                         }
735
736                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
737
738                         cuda_pop_context();
739                 }
740
741                 /* Fermi and Kepler */
742                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
743         }
744
745         void tex_free(device_memory& mem)
746         {
747                 if(mem.device_pointer) {
748                         if(tex_interp_map[mem.device_pointer]) {
749                                 cuda_push_context();
750                                 cuArrayDestroy((CUarray)mem.device_pointer);
751                                 cuda_pop_context();
752
753                                 /* Free CUtexObject (Bindless Textures) */
754                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
755                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
756                                         cuTexObjectDestroy(flat_slot);
757                                 }
758
759                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
760                                 mem.device_pointer = 0;
761
762                                 stats.mem_free(mem.device_size);
763                                 mem.device_size = 0;
764                         }
765                         else {
766                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
767                                 mem_free(mem);
768                         }
769                 }
770         }
771
772         void path_trace(RenderTile& rtile, int sample, bool branched)
773         {
774                 if(have_error())
775                         return;
776
777                 cuda_push_context();
778
779                 CUfunction cuPathTrace;
780                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
781                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
782
783                 /* get kernel function */
784                 if(branched) {
785                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
786                 }
787                 else {
788                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
789                 }
790
791                 if(have_error())
792                         return;
793
794                 /* pass in parameters */
795                 void *args[] = {&d_buffer,
796                                 &d_rng_state,
797                                 &sample,
798                                 &rtile.x,
799                                 &rtile.y,
800                                 &rtile.w,
801                                 &rtile.h,
802                                 &rtile.offset,
803                                 &rtile.stride};
804
805                 /* launch kernel */
806                 int threads_per_block;
807                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
808
809                 /*int num_registers;
810                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
811
812                 printf("threads_per_block %d\n", threads_per_block);
813                 printf("num_registers %d\n", num_registers);*/
814
815                 int xthreads = (int)sqrt((float)threads_per_block);
816                 int ythreads = (int)sqrt((float)threads_per_block);
817                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
818                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
819
820                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
821
822                 cuda_assert(cuLaunchKernel(cuPathTrace,
823                                            xblocks , yblocks, 1, /* blocks */
824                                            xthreads, ythreads, 1, /* threads */
825                                            0, 0, args, 0));
826
827                 cuda_assert(cuCtxSynchronize());
828
829                 cuda_pop_context();
830         }
831
832         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
833         {
834                 if(have_error())
835                         return;
836
837                 cuda_push_context();
838
839                 CUfunction cuFilmConvert;
840                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
841                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
842
843                 /* get kernel function */
844                 if(rgba_half) {
845                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
846                 }
847                 else {
848                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
849                 }
850
851
852                 float sample_scale = 1.0f/(task.sample + 1);
853
854                 /* pass in parameters */
855                 void *args[] = {&d_rgba,
856                                 &d_buffer,
857                                 &sample_scale,
858                                 &task.x,
859                                 &task.y,
860                                 &task.w,
861                                 &task.h,
862                                 &task.offset,
863                                 &task.stride};
864
865                 /* launch kernel */
866                 int threads_per_block;
867                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
868
869                 int xthreads = (int)sqrt((float)threads_per_block);
870                 int ythreads = (int)sqrt((float)threads_per_block);
871                 int xblocks = (task.w + xthreads - 1)/xthreads;
872                 int yblocks = (task.h + ythreads - 1)/ythreads;
873
874                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
875
876                 cuda_assert(cuLaunchKernel(cuFilmConvert,
877                                            xblocks , yblocks, 1, /* blocks */
878                                            xthreads, ythreads, 1, /* threads */
879                                            0, 0, args, 0));
880
881                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
882
883                 cuda_pop_context();
884         }
885
886         void shader(DeviceTask& task)
887         {
888                 if(have_error())
889                         return;
890
891                 cuda_push_context();
892
893                 CUfunction cuShader;
894                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
895                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
896                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
897
898                 /* get kernel function */
899                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
900                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
901                 }
902                 else {
903                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
904                 }
905
906                 /* do tasks in smaller chunks, so we can cancel it */
907                 const int shader_chunk_size = 65536;
908                 const int start = task.shader_x;
909                 const int end = task.shader_x + task.shader_w;
910                 int offset = task.offset;
911
912                 bool canceled = false;
913                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
914                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
915                                 int shader_w = min(shader_chunk_size, end - shader_x);
916
917                                 /* pass in parameters */
918                                 void *args[8];
919                                 int arg = 0;
920                                 args[arg++] = &d_input;
921                                 args[arg++] = &d_output;
922                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
923                                         args[arg++] = &d_output_luma;
924                                 }
925                                 args[arg++] = &task.shader_eval_type;
926                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
927                                         args[arg++] = &task.shader_filter;
928                                 }
929                                 args[arg++] = &shader_x;
930                                 args[arg++] = &shader_w;
931                                 args[arg++] = &offset;
932                                 args[arg++] = &sample;
933
934                                 /* launch kernel */
935                                 int threads_per_block;
936                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
937
938                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
939
940                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
941                                 cuda_assert(cuLaunchKernel(cuShader,
942                                                            xblocks , 1, 1, /* blocks */
943                                                            threads_per_block, 1, 1, /* threads */
944                                                            0, 0, args, 0));
945
946                                 cuda_assert(cuCtxSynchronize());
947
948                                 if(task.get_cancel()) {
949                                         canceled = false;
950                                         break;
951                                 }
952                         }
953
954                         task.update_progress(NULL);
955                 }
956
957                 cuda_pop_context();
958         }
959
960         CUdeviceptr map_pixels(device_ptr mem)
961         {
962                 if(!background) {
963                         PixelMem pmem = pixel_mem_map[mem];
964                         CUdeviceptr buffer;
965                         
966                         size_t bytes;
967                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
968                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
969                         
970                         return buffer;
971                 }
972
973                 return cuda_device_ptr(mem);
974         }
975
976         void unmap_pixels(device_ptr mem)
977         {
978                 if(!background) {
979                         PixelMem pmem = pixel_mem_map[mem];
980
981                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
982                 }
983         }
984
985         void pixels_alloc(device_memory& mem)
986         {
987                 if(!background) {
988                         PixelMem pmem;
989
990                         pmem.w = mem.data_width;
991                         pmem.h = mem.data_height;
992
993                         cuda_push_context();
994
995                         glGenBuffers(1, &pmem.cuPBO);
996                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
997                         if(mem.data_type == TYPE_HALF)
998                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
999                         else
1000                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1001                         
1002                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1003                         
1004                         glGenTextures(1, &pmem.cuTexId);
1005                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1006                         if(mem.data_type == TYPE_HALF)
1007                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1008                         else
1009                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1010                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1011                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1012                         glBindTexture(GL_TEXTURE_2D, 0);
1013                         
1014                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1015
1016                         if(result == CUDA_SUCCESS) {
1017                                 cuda_pop_context();
1018
1019                                 mem.device_pointer = pmem.cuTexId;
1020                                 pixel_mem_map[mem.device_pointer] = pmem;
1021
1022                                 mem.device_size = mem.memory_size();
1023                                 stats.mem_alloc(mem.device_size);
1024
1025                                 return;
1026                         }
1027                         else {
1028                                 /* failed to register buffer, fallback to no interop */
1029                                 glDeleteBuffers(1, &pmem.cuPBO);
1030                                 glDeleteTextures(1, &pmem.cuTexId);
1031
1032                                 cuda_pop_context();
1033
1034                                 background = true;
1035                         }
1036                 }
1037
1038                 Device::pixels_alloc(mem);
1039         }
1040
1041         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1042         {
1043                 if(!background) {
1044                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1045
1046                         cuda_push_context();
1047
1048                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1049                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1050                         size_t offset = sizeof(uchar)*4*y*w;
1051                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1052                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1053                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1054
1055                         cuda_pop_context();
1056
1057                         return;
1058                 }
1059
1060                 Device::pixels_copy_from(mem, y, w, h);
1061         }
1062
1063         void pixels_free(device_memory& mem)
1064         {
1065                 if(mem.device_pointer) {
1066                         if(!background) {
1067                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1068
1069                                 cuda_push_context();
1070
1071                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1072                                 glDeleteBuffers(1, &pmem.cuPBO);
1073                                 glDeleteTextures(1, &pmem.cuTexId);
1074
1075                                 cuda_pop_context();
1076
1077                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1078                                 mem.device_pointer = 0;
1079
1080                                 stats.mem_free(mem.device_size);
1081                                 mem.device_size = 0;
1082
1083                                 return;
1084                         }
1085
1086                         Device::pixels_free(mem);
1087                 }
1088         }
1089
1090         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1091                 const DeviceDrawParams &draw_params)
1092         {
1093                 if(!background) {
1094                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1095                         float *vpointer;
1096
1097                         cuda_push_context();
1098
1099                         /* for multi devices, this assumes the inefficient method that we allocate
1100                          * all pixels on the device even though we only render to a subset */
1101                         size_t offset = 4*y*w;
1102
1103                         if(mem.data_type == TYPE_HALF)
1104                                 offset *= sizeof(GLhalf);
1105                         else
1106                                 offset *= sizeof(uint8_t);
1107
1108                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1109                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1110                         if(mem.data_type == TYPE_HALF)
1111                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1112                         else
1113                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1114                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1115                         
1116                         glEnable(GL_TEXTURE_2D);
1117                         
1118                         if(transparent) {
1119                                 glEnable(GL_BLEND);
1120                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1121                         }
1122
1123                         glColor3f(1.0f, 1.0f, 1.0f);
1124
1125                         if(draw_params.bind_display_space_shader_cb) {
1126                                 draw_params.bind_display_space_shader_cb();
1127                         }
1128
1129                         if(!vertex_buffer)
1130                                 glGenBuffers(1, &vertex_buffer);
1131
1132                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1133                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1134                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1135
1136                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1137
1138                         if(vpointer) {
1139                                 /* texture coordinate - vertex pair */
1140                                 vpointer[0] = 0.0f;
1141                                 vpointer[1] = 0.0f;
1142                                 vpointer[2] = dx;
1143                                 vpointer[3] = dy;
1144
1145                                 vpointer[4] = (float)w/(float)pmem.w;
1146                                 vpointer[5] = 0.0f;
1147                                 vpointer[6] = (float)width + dx;
1148                                 vpointer[7] = dy;
1149
1150                                 vpointer[8] = (float)w/(float)pmem.w;
1151                                 vpointer[9] = (float)h/(float)pmem.h;
1152                                 vpointer[10] = (float)width + dx;
1153                                 vpointer[11] = (float)height + dy;
1154
1155                                 vpointer[12] = 0.0f;
1156                                 vpointer[13] = (float)h/(float)pmem.h;
1157                                 vpointer[14] = dx;
1158                                 vpointer[15] = (float)height + dy;
1159
1160                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1161                         }
1162
1163                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1164                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1165
1166                         glEnableClientState(GL_VERTEX_ARRAY);
1167                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1168
1169                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1170
1171                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1172                         glDisableClientState(GL_VERTEX_ARRAY);
1173
1174                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1175
1176                         if(draw_params.unbind_display_space_shader_cb) {
1177                                 draw_params.unbind_display_space_shader_cb();
1178                         }
1179
1180                         if(transparent)
1181                                 glDisable(GL_BLEND);
1182                         
1183                         glBindTexture(GL_TEXTURE_2D, 0);
1184                         glDisable(GL_TEXTURE_2D);
1185
1186                         cuda_pop_context();
1187
1188                         return;
1189                 }
1190
1191                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1192         }
1193
1194         void thread_run(DeviceTask *task)
1195         {
1196                 if(task->type == DeviceTask::PATH_TRACE) {
1197                         RenderTile tile;
1198                         
1199                         bool branched = task->integrator_branched;
1200
1201                         /* Upload Bindless Mapping */
1202                         load_bindless_mapping();
1203                         
1204                         /* keep rendering tiles until done */
1205                         while(task->acquire_tile(this, tile)) {
1206                                 int start_sample = tile.start_sample;
1207                                 int end_sample = tile.start_sample + tile.num_samples;
1208
1209                                 for(int sample = start_sample; sample < end_sample; sample++) {
1210                                         if(task->get_cancel()) {
1211                                                 if(task->need_finish_queue == false)
1212                                                         break;
1213                                         }
1214
1215                                         path_trace(tile, sample, branched);
1216
1217                                         tile.sample = sample + 1;
1218
1219                                         task->update_progress(&tile);
1220                                 }
1221
1222                                 task->release_tile(tile);
1223                         }
1224                 }
1225                 else if(task->type == DeviceTask::SHADER) {
1226                         /* Upload Bindless Mapping */
1227                         load_bindless_mapping();
1228
1229                         shader(*task);
1230
1231                         cuda_push_context();
1232                         cuda_assert(cuCtxSynchronize());
1233                         cuda_pop_context();
1234                 }
1235         }
1236
1237         class CUDADeviceTask : public DeviceTask {
1238         public:
1239                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1240                 : DeviceTask(task)
1241                 {
1242                         run = function_bind(&CUDADevice::thread_run, device, this);
1243                 }
1244         };
1245
1246         int get_split_task_count(DeviceTask& /*task*/)
1247         {
1248                 return 1;
1249         }
1250
1251         void task_add(DeviceTask& task)
1252         {
1253                 if(task.type == DeviceTask::FILM_CONVERT) {
1254                         /* must be done in main thread due to opengl access */
1255                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1256
1257                         cuda_push_context();
1258                         cuda_assert(cuCtxSynchronize());
1259                         cuda_pop_context();
1260                 }
1261                 else {
1262                         task_pool.push(new CUDADeviceTask(this, task));
1263                 }
1264         }
1265
1266         void task_wait()
1267         {
1268                 task_pool.wait();
1269         }
1270
1271         void task_cancel()
1272         {
1273                 task_pool.cancel();
1274         }
1275 };
1276
1277 bool device_cuda_init(void)
1278 {
1279 #ifdef WITH_CUDA_DYNLOAD
1280         static bool initialized = false;
1281         static bool result = false;
1282
1283         if(initialized)
1284                 return result;
1285
1286         initialized = true;
1287         int cuew_result = cuewInit();
1288         if(cuew_result == CUEW_SUCCESS) {
1289                 VLOG(1) << "CUEW initialization succeeded";
1290                 if(CUDADevice::have_precompiled_kernels()) {
1291                         VLOG(1) << "Found precompiled kernels";
1292                         result = true;
1293                 }
1294 #ifndef _WIN32
1295                 else if(cuewCompilerPath() != NULL) {
1296                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
1297                         result = true;
1298                 }
1299                 else {
1300                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1301                                 << " unable to use CUDA";
1302                 }
1303 #endif
1304         }
1305         else {
1306                 VLOG(1) << "CUEW initialization failed: "
1307                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1308                             ? "Error setting up atexit() handler"
1309                             : "Error opening the library");
1310         }
1311
1312         return result;
1313 #else  /* WITH_CUDA_DYNLOAD */
1314         return true;
1315 #endif /* WITH_CUDA_DYNLOAD */
1316 }
1317
1318 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1319 {
1320         return new CUDADevice(info, stats, background);
1321 }
1322
1323 void device_cuda_info(vector<DeviceInfo>& devices)
1324 {
1325         CUresult result;
1326         int count = 0;
1327
1328         result = cuInit(0);
1329         if(result != CUDA_SUCCESS) {
1330                 if(result != CUDA_ERROR_NO_DEVICE)
1331                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1332                 return;
1333         }
1334
1335         result = cuDeviceGetCount(&count);
1336         if(result != CUDA_SUCCESS) {
1337                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1338                 return;
1339         }
1340         
1341         vector<DeviceInfo> display_devices;
1342
1343         for(int num = 0; num < count; num++) {
1344                 char name[256];
1345                 int attr;
1346
1347                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1348                         continue;
1349
1350                 int major, minor;
1351                 cuDeviceComputeCapability(&major, &minor, num);
1352                 if(major < 2) {
1353                         continue;
1354                 }
1355
1356                 DeviceInfo info;
1357
1358                 info.type = DEVICE_CUDA;
1359                 info.description = string(name);
1360                 info.id = string_printf("CUDA_%d", num);
1361                 info.num = num;
1362
1363                 info.advanced_shading = (major >= 2);
1364                 info.has_bindless_textures = (major >= 3);
1365                 info.pack_images = false;
1366
1367                 /* if device has a kernel timeout, assume it is used for display */
1368                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1369                         info.display_device = true;
1370                         display_devices.push_back(info);
1371                 }
1372                 else
1373                         devices.push_back(info);
1374         }
1375
1376         if(!display_devices.empty())
1377                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1378 }
1379
1380 string device_cuda_capabilities(void)
1381 {
1382         CUresult result = cuInit(0);
1383         if(result != CUDA_SUCCESS) {
1384                 if(result != CUDA_ERROR_NO_DEVICE) {
1385                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1386                 }
1387                 return "No CUDA device found\n";
1388         }
1389
1390         int count;
1391         result = cuDeviceGetCount(&count);
1392         if(result != CUDA_SUCCESS) {
1393                 return string("Error getting devices: ") + cuewErrorString(result);
1394         }
1395
1396         string capabilities = "";
1397         for(int num = 0; num < count; num++) {
1398                 char name[256];
1399                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1400                         continue;
1401                 }
1402                 capabilities += string("\t") + name + "\n";
1403                 int value;
1404 #define GET_ATTR(attr) \
1405                 { \
1406                         if(cuDeviceGetAttribute(&value, \
1407                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1408                                                 num) == CUDA_SUCCESS) \
1409                         { \
1410                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1411                                                               value); \
1412                         } \
1413                 } (void)0
1414                 /* TODO(sergey): Strip all attributes which are not useful for us
1415                  * or does not depend on the driver.
1416                  */
1417                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1418                 GET_ATTR(MAX_BLOCK_DIM_X);
1419                 GET_ATTR(MAX_BLOCK_DIM_Y);
1420                 GET_ATTR(MAX_BLOCK_DIM_Z);
1421                 GET_ATTR(MAX_GRID_DIM_X);
1422                 GET_ATTR(MAX_GRID_DIM_Y);
1423                 GET_ATTR(MAX_GRID_DIM_Z);
1424                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1425                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1426                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1427                 GET_ATTR(WARP_SIZE);
1428                 GET_ATTR(MAX_PITCH);
1429                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1430                 GET_ATTR(REGISTERS_PER_BLOCK);
1431                 GET_ATTR(CLOCK_RATE);
1432                 GET_ATTR(TEXTURE_ALIGNMENT);
1433                 GET_ATTR(GPU_OVERLAP);
1434                 GET_ATTR(MULTIPROCESSOR_COUNT);
1435                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1436                 GET_ATTR(INTEGRATED);
1437                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1438                 GET_ATTR(COMPUTE_MODE);
1439                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1440                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1441                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1442                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1443                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1444                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1445                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1446                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1447                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1448                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1449                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1450                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1451                 GET_ATTR(SURFACE_ALIGNMENT);
1452                 GET_ATTR(CONCURRENT_KERNELS);
1453                 GET_ATTR(ECC_ENABLED);
1454                 GET_ATTR(TCC_DRIVER);
1455                 GET_ATTR(MEMORY_CLOCK_RATE);
1456                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1457                 GET_ATTR(L2_CACHE_SIZE);
1458                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1459                 GET_ATTR(ASYNC_ENGINE_COUNT);
1460                 GET_ATTR(UNIFIED_ADDRESSING);
1461                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1462                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1463                 GET_ATTR(CAN_TEX2D_GATHER);
1464                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1465                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1466                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1467                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1468                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1469                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1470                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1471                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1472                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1473                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1474                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1475                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1476                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1477                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1478                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1479                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1480                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1481                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1482                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1483                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1484                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1485                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1486                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1487                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1488                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1489                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1490                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1491                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1492                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1493                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1494                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1495                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1496                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1497                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1498                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1499                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1500                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1501                 GET_ATTR(MANAGED_MEMORY);
1502                 GET_ATTR(MULTI_GPU_BOARD);
1503                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1504 #undef GET_ATTR
1505                 capabilities += "\n";
1506         }
1507
1508         return capabilities;
1509 }
1510
1511 CCL_NAMESPACE_END