Cycles: Cleanup, trailing whitespace
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #ifdef WITH_CUDA_DYNLOAD
27 #  include "cuew.h"
28 #else
29 #  include "util_opengl.h"
30 #  include <cuda.h>
31 #  include <cudaGL.h>
32 #endif
33 #include "util_debug.h"
34 #include "util_logging.h"
35 #include "util_map.h"
36 #include "util_md5.h"
37 #include "util_opengl.h"
38 #include "util_path.h"
39 #include "util_string.h"
40 #include "util_system.h"
41 #include "util_types.h"
42 #include "util_time.h"
43
44 CCL_NAMESPACE_BEGIN
45
46 #ifndef WITH_CUDA_DYNLOAD
47
48 /* Transparently implement some functions, so majority of the file does not need
49  * to worry about difference between dynamically loaded and linked CUDA at all.
50  */
51
52 namespace {
53
54 const char *cuewErrorString(CUresult result)
55 {
56         /* We can only give error code here without major code duplication, that
57          * should be enough since dynamic loading is only being disabled by folks
58          * who knows what they're doing anyway.
59          *
60          * NOTE: Avoid call from several threads.
61          */
62         static string error;
63         error = string_printf("%d", result);
64         return error.c_str();
65 }
66
67 const char *cuewCompilerPath(void)
68 {
69         return CYCLES_CUDA_NVCC_EXECUTABLE;
70 }
71
72 int cuewCompilerVersion(void)
73 {
74         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
75 }
76
77 }  /* namespace */
78 #endif  /* WITH_CUDA_DYNLOAD */
79
80 class CUDADevice : public Device
81 {
82 public:
83         DedicatedTaskPool task_pool;
84         CUdevice cuDevice;
85         CUcontext cuContext;
86         CUmodule cuModule;
87         map<device_ptr, bool> tex_interp_map;
88         map<device_ptr, uint> tex_bindless_map;
89         int cuDevId;
90         int cuDevArchitecture;
91         bool first_error;
92
93         struct PixelMem {
94                 GLuint cuPBO;
95                 CUgraphicsResource cuPBOresource;
96                 GLuint cuTexId;
97                 int w, h;
98         };
99
100         map<device_ptr, PixelMem> pixel_mem_map;
101
102         /* Bindless Textures */
103         device_vector<uint> bindless_mapping;
104         bool need_bindless_mapping;
105
106         CUdeviceptr cuda_device_ptr(device_ptr mem)
107         {
108                 return (CUdeviceptr)mem;
109         }
110
111         static bool have_precompiled_kernels()
112         {
113                 string cubins_path = path_get("lib");
114                 return path_exists(cubins_path);
115         }
116
117 /*#ifdef NDEBUG
118 #define cuda_abort()
119 #else
120 #define cuda_abort() abort()
121 #endif*/
122         void cuda_error_documentation()
123         {
124                 if(first_error) {
125                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
126                         fprintf(stderr, "http://www.blender.org/manual/render/cycles/gpu_rendering.html\n\n");
127                         first_error = false;
128                 }
129         }
130
131 #define cuda_assert(stmt) \
132         { \
133                 CUresult result = stmt; \
134                 \
135                 if(result != CUDA_SUCCESS) { \
136                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
137                         if(error_msg == "") \
138                                 error_msg = message; \
139                         fprintf(stderr, "%s\n", message.c_str()); \
140                         /*cuda_abort();*/ \
141                         cuda_error_documentation(); \
142                 } \
143         } (void)0
144
145         bool cuda_error_(CUresult result, const string& stmt)
146         {
147                 if(result == CUDA_SUCCESS)
148                         return false;
149
150                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
151                 if(error_msg == "")
152                         error_msg = message;
153                 fprintf(stderr, "%s\n", message.c_str());
154                 cuda_error_documentation();
155                 return true;
156         }
157
158 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
159
160         void cuda_error_message(const string& message)
161         {
162                 if(error_msg == "")
163                         error_msg = message;
164                 fprintf(stderr, "%s\n", message.c_str());
165                 cuda_error_documentation();
166         }
167
168         void cuda_push_context()
169         {
170                 cuda_assert(cuCtxSetCurrent(cuContext));
171         }
172
173         void cuda_pop_context()
174         {
175                 cuda_assert(cuCtxSetCurrent(NULL));
176         }
177
178         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
179         : Device(info, stats, background_)
180         {
181                 first_error = true;
182                 background = background_;
183
184                 cuDevId = info.num;
185                 cuDevice = 0;
186                 cuContext = 0;
187
188                 need_bindless_mapping = false;
189
190                 /* intialize */
191                 if(cuda_error(cuInit(0)))
192                         return;
193
194                 /* setup device and context */
195                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
196                         return;
197
198                 CUresult result;
199
200                 if(background) {
201                         result = cuCtxCreate(&cuContext, 0, cuDevice);
202                 }
203                 else {
204                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
205
206                         if(result != CUDA_SUCCESS) {
207                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
208                                 background = true;
209                         }
210                 }
211
212                 if(cuda_error_(result, "cuCtxCreate"))
213                         return;
214
215                 int major, minor;
216                 cuDeviceComputeCapability(&major, &minor, cuDevId);
217                 cuDevArchitecture = major*100 + minor*10;
218
219                 cuda_pop_context();
220         }
221
222         ~CUDADevice()
223         {
224                 task_pool.stop();
225
226                 if(info.has_bindless_textures) {
227                         tex_free(bindless_mapping);
228                 }
229
230                 cuda_assert(cuCtxDestroy(cuContext));
231         }
232
233         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
234         {
235                 int major, minor;
236                 cuDeviceComputeCapability(&major, &minor, cuDevId);
237
238                 /* We only support sm_20 and above */
239                 if(major < 2) {
240                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
241                         return false;
242                 }
243
244                 return true;
245         }
246
247         bool use_adaptive_compilation()
248         {
249                 return DebugFlags().cuda.adaptive_compile;
250         }
251
252         /* Common NVCC flags which stays the same regardless of shading model,
253          * kernel sources md5 and only depends on compiler or compilation settings.
254          */
255         string compile_kernel_get_common_cflags(
256                 const DeviceRequestedFeatures& requested_features)
257         {
258                 const int cuda_version = cuewCompilerVersion();
259                 const int machine = system_cpu_bits();
260                 const string kernel_path = path_get("kernel");
261                 const string include = kernel_path;
262                 string cflags = string_printf("-m%d "
263                                               "--ptxas-options=\"-v\" "
264                                               "--use_fast_math "
265                                               "-DNVCC "
266                                               "-D__KERNEL_CUDA_VERSION__=%d "
267                                                "-I\"%s\"",
268                                               machine,
269                                               cuda_version,
270                                               include.c_str());
271                 if(use_adaptive_compilation()) {
272                         cflags += " " + requested_features.get_build_options();
273                 }
274                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
275                 if(extra_cflags) {
276                         cflags += string(" ") + string(extra_cflags);
277                 }
278 #ifdef WITH_CYCLES_DEBUG
279                 cflags += " -D__KERNEL_DEBUG__";
280 #endif
281                 return cflags;
282         }
283
284         bool compile_check_compiler() {
285                 const char *nvcc = cuewCompilerPath();
286                 if(nvcc == NULL) {
287                         cuda_error_message("CUDA nvcc compiler not found. "
288                                            "Install CUDA toolkit in default location.");
289                         return false;
290                 }
291                 const int cuda_version = cuewCompilerVersion();
292                 VLOG(1) << "Found nvcc " << nvcc
293                         << ", CUDA version " << cuda_version
294                         << ".";
295                 const int major = cuda_version / 10, minor = cuda_version & 10;
296                 if(cuda_version == 0) {
297                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
298                         return false;
299                 }
300                 if(cuda_version < 60) {
301                         printf("Unsupported CUDA version %d.%d detected, "
302                                "you need CUDA 7.5 or newer.\n",
303                                major, minor);
304                         return false;
305                 }
306                 else if(cuda_version != 75 && cuda_version != 80) {
307                         printf("CUDA version %d.%d detected, build may succeed but only "
308                                "CUDA 7.5 and 8.0 are officially supported.\n",
309                                major, minor);
310                 }
311                 return true;
312         }
313
314         string compile_kernel(const DeviceRequestedFeatures& requested_features)
315         {
316                 /* Compute cubin name. */
317                 int major, minor;
318                 cuDeviceComputeCapability(&major, &minor, cuDevId);
319
320                 /* Attempt to use kernel provided with Blender. */
321                 if(!use_adaptive_compilation()) {
322                         const string cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin",
323                                                                     major, minor));
324                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
325                         if(path_exists(cubin)) {
326                                 VLOG(1) << "Using precompiled kernel.";
327                                 return cubin;
328                         }
329                 }
330
331                 const string common_cflags =
332                         compile_kernel_get_common_cflags(requested_features);
333
334                 /* Try to use locally compiled kernel. */
335                 const string kernel_path = path_get("kernel");
336                 const string kernel_md5 = path_files_md5_hash(kernel_path);
337
338                 /* We include cflags into md5 so changing cuda toolkit or changing other
339                  * compiler command line arguments makes sure cubin gets re-built.
340                  */
341                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
342
343                 const string cubin_file = string_printf("cycles_kernel_sm%d%d_%s.cubin",
344                                                         major, minor,
345                                                         cubin_md5.c_str());
346                 const string cubin = path_user_get(path_join("cache", cubin_file));
347                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
348                 if(path_exists(cubin)) {
349                         VLOG(1) << "Using locally compiled kernel.";
350                         return cubin;
351                 }
352
353 #ifdef _WIN32
354                 if(have_precompiled_kernels()) {
355                         if(major < 2) {
356                                 cuda_error_message(string_printf(
357                                         "CUDA device requires compute capability 2.0 or up, "
358                                         "found %d.%d. Your GPU is not supported.",
359                                         major, minor));
360                         }
361                         else {
362                                 cuda_error_message(string_printf(
363                                         "CUDA binary kernel for this graphics card compute "
364                                         "capability (%d.%d) not found.",
365                                         major, minor));
366                         }
367                         return "";
368                 }
369 #endif
370
371                 /* Compile. */
372                 if(!compile_check_compiler()) {
373                         return "";
374                 }
375                 const char *nvcc = cuewCompilerPath();
376                 const string kernel = path_join(kernel_path,
377                                           path_join("kernels",
378                                                     path_join("cuda", "kernel.cu")));
379                 double starttime = time_dt();
380                 printf("Compiling CUDA kernel ...\n");
381
382                 path_create_directories(cubin);
383
384                 string command = string_printf("\"%s\" "
385                                                "-arch=sm_%d%d "
386                                                "--cubin \"%s\" "
387                                                "-o \"%s\" "
388                                                "%s ",
389                                                nvcc,
390                                                major, minor,
391                                                kernel.c_str(),
392                                                cubin.c_str(),
393                                                common_cflags.c_str());
394
395                 printf("%s\n", command.c_str());
396
397                 if(system(command.c_str()) == -1) {
398                         cuda_error_message("Failed to execute compilation command, "
399                                            "see console for details.");
400                         return "";
401                 }
402
403                 /* Verify if compilation succeeded */
404                 if(!path_exists(cubin)) {
405                         cuda_error_message("CUDA kernel compilation failed, "
406                                            "see console for details.");
407                         return "";
408                 }
409
410                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
411
412                 return cubin;
413         }
414
415         bool load_kernels(const DeviceRequestedFeatures& requested_features)
416         {
417                 /* check if cuda init succeeded */
418                 if(cuContext == 0)
419                         return false;
420
421                 /* check if GPU is supported */
422                 if(!support_device(requested_features))
423                         return false;
424
425                 /* get kernel */
426                 string cubin = compile_kernel(requested_features);
427
428                 if(cubin == "")
429                         return false;
430
431                 /* open module */
432                 cuda_push_context();
433
434                 string cubin_data;
435                 CUresult result;
436
437                 if(path_read_text(cubin, cubin_data))
438                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
439                 else
440                         result = CUDA_ERROR_FILE_NOT_FOUND;
441
442                 if(cuda_error_(result, "cuModuleLoad"))
443                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
444
445                 cuda_pop_context();
446
447                 return (result == CUDA_SUCCESS);
448         }
449
450         void load_bindless_mapping()
451         {
452                 if(info.has_bindless_textures && need_bindless_mapping) {
453                         tex_free(bindless_mapping);
454                         tex_alloc("__bindless_mapping", bindless_mapping, INTERPOLATION_NONE, EXTENSION_REPEAT);
455                         need_bindless_mapping = false;
456                 }
457         }
458
459         void mem_alloc(device_memory& mem, MemoryType /*type*/)
460         {
461                 cuda_push_context();
462                 CUdeviceptr device_pointer;
463                 size_t size = mem.memory_size();
464                 cuda_assert(cuMemAlloc(&device_pointer, size));
465                 mem.device_pointer = (device_ptr)device_pointer;
466                 mem.device_size = size;
467                 stats.mem_alloc(size);
468                 cuda_pop_context();
469         }
470
471         void mem_copy_to(device_memory& mem)
472         {
473                 cuda_push_context();
474                 if(mem.device_pointer)
475                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
476                 cuda_pop_context();
477         }
478
479         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
480         {
481                 size_t offset = elem*y*w;
482                 size_t size = elem*w*h;
483
484                 cuda_push_context();
485                 if(mem.device_pointer) {
486                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
487                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
488                 }
489                 else {
490                         memset((char*)mem.data_pointer + offset, 0, size);
491                 }
492                 cuda_pop_context();
493         }
494
495         void mem_zero(device_memory& mem)
496         {
497                 memset((void*)mem.data_pointer, 0, mem.memory_size());
498
499                 cuda_push_context();
500                 if(mem.device_pointer)
501                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
502                 cuda_pop_context();
503         }
504
505         void mem_free(device_memory& mem)
506         {
507                 if(mem.device_pointer) {
508                         cuda_push_context();
509                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
510                         cuda_pop_context();
511
512                         mem.device_pointer = 0;
513
514                         stats.mem_free(mem.device_size);
515                         mem.device_size = 0;
516                 }
517         }
518
519         void const_copy_to(const char *name, void *host, size_t size)
520         {
521                 CUdeviceptr mem;
522                 size_t bytes;
523
524                 cuda_push_context();
525                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
526                 //assert(bytes == size);
527                 cuda_assert(cuMemcpyHtoD(mem, host, size));
528                 cuda_pop_context();
529         }
530
531         void tex_alloc(const char *name,
532                        device_memory& mem,
533                        InterpolationType interpolation,
534                        ExtensionType extension)
535         {
536                 VLOG(1) << "Texture allocate: " << name << ", "
537                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
538                         << string_human_readable_size(mem.memory_size()) << ")";
539
540                 /* Check if we are on sm_30 or above.
541                  * We use arrays and bindles textures for storage there */
542                 bool has_bindless_textures = info.has_bindless_textures;
543
544                 /* General variables for both architectures */
545                 string bind_name = name;
546                 size_t dsize = datatype_size(mem.data_type);
547                 size_t size = mem.memory_size();
548
549                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
550                 switch(extension) {
551                         case EXTENSION_REPEAT:
552                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
553                                 break;
554                         case EXTENSION_EXTEND:
555                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
556                                 break;
557                         case EXTENSION_CLIP:
558                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
559                                 break;
560                         default:
561                                 assert(0);
562                                 break;
563                 }
564
565                 CUfilter_mode filter_mode;
566                 if(interpolation == INTERPOLATION_CLOSEST) {
567                         filter_mode = CU_TR_FILTER_MODE_POINT;
568                 }
569                 else {
570                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
571                 }
572
573                 CUarray_format_enum format;
574                 switch(mem.data_type) {
575                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
576                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
577                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
578                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
579                         default: assert(0); return;
580                 }
581
582                 /* General variables for Fermi */
583                 CUtexref texref = NULL;
584
585                 if(!has_bindless_textures) {
586                         if(mem.data_depth > 1) {
587                                 /* Kernel uses different bind names for 2d and 3d float textures,
588                                  * so we have to adjust couple of things here.
589                                  */
590                                 vector<string> tokens;
591                                 string_split(tokens, name, "_");
592                                 bind_name = string_printf("__tex_image_%s_3d_%s",
593                                                           tokens[2].c_str(),
594                                                           tokens[3].c_str());
595                         }
596
597                         cuda_push_context();
598                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
599                         cuda_pop_context();
600
601                         if(!texref) {
602                                 return;
603                         }
604                 }
605
606                 /* Data Storage */
607                 if(interpolation == INTERPOLATION_NONE) {
608                         if(has_bindless_textures) {
609                                 mem_alloc(mem, MEM_READ_ONLY);
610                                 mem_copy_to(mem);
611
612                                 cuda_push_context();
613
614                                 CUdeviceptr cumem;
615                                 size_t cubytes;
616
617                                 cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
618
619                                 if(cubytes == 8) {
620                                         /* 64 bit device pointer */
621                                         uint64_t ptr = mem.device_pointer;
622                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
623                                 }
624                                 else {
625                                         /* 32 bit device pointer */
626                                         uint32_t ptr = (uint32_t)mem.device_pointer;
627                                         cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
628                                 }
629
630                                 cuda_pop_context();
631                         }
632                         else {
633                                 mem_alloc(mem, MEM_READ_ONLY);
634                                 mem_copy_to(mem);
635
636                                 cuda_push_context();
637
638                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
639                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
640                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
641
642                                 cuda_pop_context();
643                         }
644                 }
645                 /* Texture Storage */
646                 else {
647                         CUarray handle = NULL;
648
649                         cuda_push_context();
650
651                         if(mem.data_depth > 1) {
652                                 CUDA_ARRAY3D_DESCRIPTOR desc;
653
654                                 desc.Width = mem.data_width;
655                                 desc.Height = mem.data_height;
656                                 desc.Depth = mem.data_depth;
657                                 desc.Format = format;
658                                 desc.NumChannels = mem.data_elements;
659                                 desc.Flags = 0;
660
661                                 cuda_assert(cuArray3DCreate(&handle, &desc));
662                         }
663                         else {
664                                 CUDA_ARRAY_DESCRIPTOR desc;
665
666                                 desc.Width = mem.data_width;
667                                 desc.Height = mem.data_height;
668                                 desc.Format = format;
669                                 desc.NumChannels = mem.data_elements;
670
671                                 cuda_assert(cuArrayCreate(&handle, &desc));
672                         }
673
674                         if(!handle) {
675                                 cuda_pop_context();
676                                 return;
677                         }
678
679                         /* Allocate 3D, 2D or 1D memory */
680                         if(mem.data_depth > 1) {
681                                 CUDA_MEMCPY3D param;
682                                 memset(&param, 0, sizeof(param));
683                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
684                                 param.dstArray = handle;
685                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
686                                 param.srcHost = (void*)mem.data_pointer;
687                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
688                                 param.WidthInBytes = param.srcPitch;
689                                 param.Height = mem.data_height;
690                                 param.Depth = mem.data_depth;
691
692                                 cuda_assert(cuMemcpy3D(&param));
693                         }
694                         else if(mem.data_height > 1) {
695                                 CUDA_MEMCPY2D param;
696                                 memset(&param, 0, sizeof(param));
697                                 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
698                                 param.dstArray = handle;
699                                 param.srcMemoryType = CU_MEMORYTYPE_HOST;
700                                 param.srcHost = (void*)mem.data_pointer;
701                                 param.srcPitch = mem.data_width*dsize*mem.data_elements;
702                                 param.WidthInBytes = param.srcPitch;
703                                 param.Height = mem.data_height;
704
705                                 cuda_assert(cuMemcpy2D(&param));
706                         }
707                         else
708                                 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
709
710                         /* Fermi and Kepler */
711                         mem.device_pointer = (device_ptr)handle;
712                         mem.device_size = size;
713
714                         stats.mem_alloc(size);
715
716                         /* Bindless Textures - Kepler */
717                         if(has_bindless_textures) {
718                                 int flat_slot = 0;
719                                 if(string_startswith(name, "__tex_image")) {
720                                         int pos =  string(name).rfind("_");
721                                         flat_slot = atoi(name + pos + 1);
722                                 }
723                                 else {
724                                         assert(0);
725                                 }
726
727                                 CUDA_RESOURCE_DESC resDesc;
728                                 memset(&resDesc, 0, sizeof(resDesc));
729                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
730                                 resDesc.res.array.hArray = handle;
731                                 resDesc.flags = 0;
732
733                                 CUDA_TEXTURE_DESC texDesc;
734                                 memset(&texDesc, 0, sizeof(texDesc));
735                                 texDesc.addressMode[0] = address_mode;
736                                 texDesc.addressMode[1] = address_mode;
737                                 texDesc.addressMode[2] = address_mode;
738                                 texDesc.filterMode = filter_mode;
739                                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
740
741                                 CUtexObject tex = 0;
742                                 cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
743
744                                 /* Safety check */
745                                 if((uint)tex > UINT_MAX) {
746                                         assert(0);
747                                 }
748
749                                 /* Resize once */
750                                 if(flat_slot >= bindless_mapping.size())
751                                         bindless_mapping.resize(4096); /*TODO(dingto): Make this a variable */
752
753                                 /* Set Mapping and tag that we need to (re-)upload to device */
754                                 bindless_mapping.get_data()[flat_slot] = (uint)tex;
755                                 tex_bindless_map[mem.device_pointer] = (uint)tex;
756                                 need_bindless_mapping = true;
757                         }
758                         /* Regular Textures - Fermi */
759                         else {
760                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
761                                 cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
762                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
763                         }
764
765                         cuda_pop_context();
766                 }
767
768                 /* Fermi, Data and Image Textures */
769                 if(!has_bindless_textures) {
770                         cuda_push_context();
771
772                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
773                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
774                         if(mem.data_depth > 1) {
775                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
776                         }
777
778                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
779
780                         cuda_pop_context();
781                 }
782
783                 /* Fermi and Kepler */
784                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
785         }
786
787         void tex_free(device_memory& mem)
788         {
789                 if(mem.device_pointer) {
790                         if(tex_interp_map[mem.device_pointer]) {
791                                 cuda_push_context();
792                                 cuArrayDestroy((CUarray)mem.device_pointer);
793                                 cuda_pop_context();
794
795                                 /* Free CUtexObject (Bindless Textures) */
796                                 if(info.has_bindless_textures && tex_bindless_map[mem.device_pointer]) {
797                                         uint flat_slot = tex_bindless_map[mem.device_pointer];
798                                         cuTexObjectDestroy(flat_slot);
799                                 }
800
801                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
802                                 mem.device_pointer = 0;
803
804                                 stats.mem_free(mem.device_size);
805                                 mem.device_size = 0;
806                         }
807                         else {
808                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
809                                 mem_free(mem);
810                         }
811                 }
812         }
813
814         void path_trace(RenderTile& rtile, int sample, bool branched)
815         {
816                 if(have_error())
817                         return;
818
819                 cuda_push_context();
820
821                 CUfunction cuPathTrace;
822                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
823                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
824
825                 /* get kernel function */
826                 if(branched) {
827                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
828                 }
829                 else {
830                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
831                 }
832
833                 if(have_error())
834                         return;
835
836                 /* pass in parameters */
837                 void *args[] = {&d_buffer,
838                                 &d_rng_state,
839                                 &sample,
840                                 &rtile.x,
841                                 &rtile.y,
842                                 &rtile.w,
843                                 &rtile.h,
844                                 &rtile.offset,
845                                 &rtile.stride};
846
847                 /* launch kernel */
848                 int threads_per_block;
849                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
850
851                 /*int num_registers;
852                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
853
854                 printf("threads_per_block %d\n", threads_per_block);
855                 printf("num_registers %d\n", num_registers);*/
856
857                 int xthreads = (int)sqrt(threads_per_block);
858                 int ythreads = (int)sqrt(threads_per_block);
859                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
860                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
861
862                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
863
864                 cuda_assert(cuLaunchKernel(cuPathTrace,
865                                            xblocks , yblocks, 1, /* blocks */
866                                            xthreads, ythreads, 1, /* threads */
867                                            0, 0, args, 0));
868
869                 cuda_assert(cuCtxSynchronize());
870
871                 cuda_pop_context();
872         }
873
874         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
875         {
876                 if(have_error())
877                         return;
878
879                 cuda_push_context();
880
881                 CUfunction cuFilmConvert;
882                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
883                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
884
885                 /* get kernel function */
886                 if(rgba_half) {
887                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
888                 }
889                 else {
890                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
891                 }
892
893
894                 float sample_scale = 1.0f/(task.sample + 1);
895
896                 /* pass in parameters */
897                 void *args[] = {&d_rgba,
898                                 &d_buffer,
899                                 &sample_scale,
900                                 &task.x,
901                                 &task.y,
902                                 &task.w,
903                                 &task.h,
904                                 &task.offset,
905                                 &task.stride};
906
907                 /* launch kernel */
908                 int threads_per_block;
909                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
910
911                 int xthreads = (int)sqrt(threads_per_block);
912                 int ythreads = (int)sqrt(threads_per_block);
913                 int xblocks = (task.w + xthreads - 1)/xthreads;
914                 int yblocks = (task.h + ythreads - 1)/ythreads;
915
916                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
917
918                 cuda_assert(cuLaunchKernel(cuFilmConvert,
919                                            xblocks , yblocks, 1, /* blocks */
920                                            xthreads, ythreads, 1, /* threads */
921                                            0, 0, args, 0));
922
923                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
924
925                 cuda_pop_context();
926         }
927
928         void shader(DeviceTask& task)
929         {
930                 if(have_error())
931                         return;
932
933                 cuda_push_context();
934
935                 CUfunction cuShader;
936                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
937                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
938                 CUdeviceptr d_output_luma = cuda_device_ptr(task.shader_output_luma);
939
940                 /* get kernel function */
941                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
942                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
943                 }
944                 else {
945                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
946                 }
947
948                 /* do tasks in smaller chunks, so we can cancel it */
949                 const int shader_chunk_size = 65536;
950                 const int start = task.shader_x;
951                 const int end = task.shader_x + task.shader_w;
952                 int offset = task.offset;
953
954                 bool canceled = false;
955                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
956                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
957                                 int shader_w = min(shader_chunk_size, end - shader_x);
958
959                                 /* pass in parameters */
960                                 void *args[8];
961                                 int arg = 0;
962                                 args[arg++] = &d_input;
963                                 args[arg++] = &d_output;
964                                 if(task.shader_eval_type < SHADER_EVAL_BAKE) {
965                                         args[arg++] = &d_output_luma;
966                                 }
967                                 args[arg++] = &task.shader_eval_type;
968                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
969                                         args[arg++] = &task.shader_filter;
970                                 }
971                                 args[arg++] = &shader_x;
972                                 args[arg++] = &shader_w;
973                                 args[arg++] = &offset;
974                                 args[arg++] = &sample;
975
976                                 /* launch kernel */
977                                 int threads_per_block;
978                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
979
980                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
981
982                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
983                                 cuda_assert(cuLaunchKernel(cuShader,
984                                                            xblocks , 1, 1, /* blocks */
985                                                            threads_per_block, 1, 1, /* threads */
986                                                            0, 0, args, 0));
987
988                                 cuda_assert(cuCtxSynchronize());
989
990                                 if(task.get_cancel()) {
991                                         canceled = false;
992                                         break;
993                                 }
994                         }
995
996                         task.update_progress(NULL);
997                 }
998
999                 cuda_pop_context();
1000         }
1001
1002         CUdeviceptr map_pixels(device_ptr mem)
1003         {
1004                 if(!background) {
1005                         PixelMem pmem = pixel_mem_map[mem];
1006                         CUdeviceptr buffer;
1007
1008                         size_t bytes;
1009                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1010                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1011
1012                         return buffer;
1013                 }
1014
1015                 return cuda_device_ptr(mem);
1016         }
1017
1018         void unmap_pixels(device_ptr mem)
1019         {
1020                 if(!background) {
1021                         PixelMem pmem = pixel_mem_map[mem];
1022
1023                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1024                 }
1025         }
1026
1027         void pixels_alloc(device_memory& mem)
1028         {
1029                 if(!background) {
1030                         PixelMem pmem;
1031
1032                         pmem.w = mem.data_width;
1033                         pmem.h = mem.data_height;
1034
1035                         cuda_push_context();
1036
1037                         glGenBuffers(1, &pmem.cuPBO);
1038                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1039                         if(mem.data_type == TYPE_HALF)
1040                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1041                         else
1042                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1043
1044                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1045
1046                         glGenTextures(1, &pmem.cuTexId);
1047                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1048                         if(mem.data_type == TYPE_HALF)
1049                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1050                         else
1051                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1052                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1053                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1054                         glBindTexture(GL_TEXTURE_2D, 0);
1055
1056                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1057
1058                         if(result == CUDA_SUCCESS) {
1059                                 cuda_pop_context();
1060
1061                                 mem.device_pointer = pmem.cuTexId;
1062                                 pixel_mem_map[mem.device_pointer] = pmem;
1063
1064                                 mem.device_size = mem.memory_size();
1065                                 stats.mem_alloc(mem.device_size);
1066
1067                                 return;
1068                         }
1069                         else {
1070                                 /* failed to register buffer, fallback to no interop */
1071                                 glDeleteBuffers(1, &pmem.cuPBO);
1072                                 glDeleteTextures(1, &pmem.cuTexId);
1073
1074                                 cuda_pop_context();
1075
1076                                 background = true;
1077                         }
1078                 }
1079
1080                 Device::pixels_alloc(mem);
1081         }
1082
1083         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1084         {
1085                 if(!background) {
1086                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1087
1088                         cuda_push_context();
1089
1090                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1091                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1092                         size_t offset = sizeof(uchar)*4*y*w;
1093                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1094                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1095                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1096
1097                         cuda_pop_context();
1098
1099                         return;
1100                 }
1101
1102                 Device::pixels_copy_from(mem, y, w, h);
1103         }
1104
1105         void pixels_free(device_memory& mem)
1106         {
1107                 if(mem.device_pointer) {
1108                         if(!background) {
1109                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1110
1111                                 cuda_push_context();
1112
1113                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1114                                 glDeleteBuffers(1, &pmem.cuPBO);
1115                                 glDeleteTextures(1, &pmem.cuTexId);
1116
1117                                 cuda_pop_context();
1118
1119                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1120                                 mem.device_pointer = 0;
1121
1122                                 stats.mem_free(mem.device_size);
1123                                 mem.device_size = 0;
1124
1125                                 return;
1126                         }
1127
1128                         Device::pixels_free(mem);
1129                 }
1130         }
1131
1132         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1133                 const DeviceDrawParams &draw_params)
1134         {
1135                 if(!background) {
1136                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1137                         float *vpointer;
1138
1139                         cuda_push_context();
1140
1141                         /* for multi devices, this assumes the inefficient method that we allocate
1142                          * all pixels on the device even though we only render to a subset */
1143                         size_t offset = 4*y*w;
1144
1145                         if(mem.data_type == TYPE_HALF)
1146                                 offset *= sizeof(GLhalf);
1147                         else
1148                                 offset *= sizeof(uint8_t);
1149
1150                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1151                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1152                         if(mem.data_type == TYPE_HALF)
1153                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1154                         else
1155                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1156                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1157
1158                         glEnable(GL_TEXTURE_2D);
1159
1160                         if(transparent) {
1161                                 glEnable(GL_BLEND);
1162                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1163                         }
1164
1165                         glColor3f(1.0f, 1.0f, 1.0f);
1166
1167                         if(draw_params.bind_display_space_shader_cb) {
1168                                 draw_params.bind_display_space_shader_cb();
1169                         }
1170
1171                         if(!vertex_buffer)
1172                                 glGenBuffers(1, &vertex_buffer);
1173
1174                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1175                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1176                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1177
1178                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1179
1180                         if(vpointer) {
1181                                 /* texture coordinate - vertex pair */
1182                                 vpointer[0] = 0.0f;
1183                                 vpointer[1] = 0.0f;
1184                                 vpointer[2] = dx;
1185                                 vpointer[3] = dy;
1186
1187                                 vpointer[4] = (float)w/(float)pmem.w;
1188                                 vpointer[5] = 0.0f;
1189                                 vpointer[6] = (float)width + dx;
1190                                 vpointer[7] = dy;
1191
1192                                 vpointer[8] = (float)w/(float)pmem.w;
1193                                 vpointer[9] = (float)h/(float)pmem.h;
1194                                 vpointer[10] = (float)width + dx;
1195                                 vpointer[11] = (float)height + dy;
1196
1197                                 vpointer[12] = 0.0f;
1198                                 vpointer[13] = (float)h/(float)pmem.h;
1199                                 vpointer[14] = dx;
1200                                 vpointer[15] = (float)height + dy;
1201
1202                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1203                         }
1204
1205                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1206                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1207
1208                         glEnableClientState(GL_VERTEX_ARRAY);
1209                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1210
1211                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1212
1213                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1214                         glDisableClientState(GL_VERTEX_ARRAY);
1215
1216                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1217
1218                         if(draw_params.unbind_display_space_shader_cb) {
1219                                 draw_params.unbind_display_space_shader_cb();
1220                         }
1221
1222                         if(transparent)
1223                                 glDisable(GL_BLEND);
1224
1225                         glBindTexture(GL_TEXTURE_2D, 0);
1226                         glDisable(GL_TEXTURE_2D);
1227
1228                         cuda_pop_context();
1229
1230                         return;
1231                 }
1232
1233                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1234         }
1235
1236         void thread_run(DeviceTask *task)
1237         {
1238                 if(task->type == DeviceTask::PATH_TRACE) {
1239                         RenderTile tile;
1240
1241                         bool branched = task->integrator_branched;
1242
1243                         /* Upload Bindless Mapping */
1244                         load_bindless_mapping();
1245
1246                         /* keep rendering tiles until done */
1247                         while(task->acquire_tile(this, tile)) {
1248                                 int start_sample = tile.start_sample;
1249                                 int end_sample = tile.start_sample + tile.num_samples;
1250
1251                                 for(int sample = start_sample; sample < end_sample; sample++) {
1252                                         if(task->get_cancel()) {
1253                                                 if(task->need_finish_queue == false)
1254                                                         break;
1255                                         }
1256
1257                                         path_trace(tile, sample, branched);
1258
1259                                         tile.sample = sample + 1;
1260
1261                                         task->update_progress(&tile);
1262                                 }
1263
1264                                 task->release_tile(tile);
1265                         }
1266                 }
1267                 else if(task->type == DeviceTask::SHADER) {
1268                         /* Upload Bindless Mapping */
1269                         load_bindless_mapping();
1270
1271                         shader(*task);
1272
1273                         cuda_push_context();
1274                         cuda_assert(cuCtxSynchronize());
1275                         cuda_pop_context();
1276                 }
1277         }
1278
1279         class CUDADeviceTask : public DeviceTask {
1280         public:
1281                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1282                 : DeviceTask(task)
1283                 {
1284                         run = function_bind(&CUDADevice::thread_run, device, this);
1285                 }
1286         };
1287
1288         int get_split_task_count(DeviceTask& /*task*/)
1289         {
1290                 return 1;
1291         }
1292
1293         void task_add(DeviceTask& task)
1294         {
1295                 if(task.type == DeviceTask::FILM_CONVERT) {
1296                         /* must be done in main thread due to opengl access */
1297                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1298
1299                         cuda_push_context();
1300                         cuda_assert(cuCtxSynchronize());
1301                         cuda_pop_context();
1302                 }
1303                 else {
1304                         task_pool.push(new CUDADeviceTask(this, task));
1305                 }
1306         }
1307
1308         void task_wait()
1309         {
1310                 task_pool.wait();
1311         }
1312
1313         void task_cancel()
1314         {
1315                 task_pool.cancel();
1316         }
1317 };
1318
1319 bool device_cuda_init(void)
1320 {
1321 #ifdef WITH_CUDA_DYNLOAD
1322         static bool initialized = false;
1323         static bool result = false;
1324
1325         if(initialized)
1326                 return result;
1327
1328         initialized = true;
1329         int cuew_result = cuewInit();
1330         if(cuew_result == CUEW_SUCCESS) {
1331                 VLOG(1) << "CUEW initialization succeeded";
1332                 if(CUDADevice::have_precompiled_kernels()) {
1333                         VLOG(1) << "Found precompiled kernels";
1334                         result = true;
1335                 }
1336 #ifndef _WIN32
1337                 else if(cuewCompilerPath() != NULL) {
1338                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
1339                         result = true;
1340                 }
1341                 else {
1342                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
1343                                 << " unable to use CUDA";
1344                 }
1345 #endif
1346         }
1347         else {
1348                 VLOG(1) << "CUEW initialization failed: "
1349                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
1350                             ? "Error setting up atexit() handler"
1351                             : "Error opening the library");
1352         }
1353
1354         return result;
1355 #else  /* WITH_CUDA_DYNLOAD */
1356         return true;
1357 #endif /* WITH_CUDA_DYNLOAD */
1358 }
1359
1360 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1361 {
1362         return new CUDADevice(info, stats, background);
1363 }
1364
1365 void device_cuda_info(vector<DeviceInfo>& devices)
1366 {
1367         CUresult result;
1368         int count = 0;
1369
1370         result = cuInit(0);
1371         if(result != CUDA_SUCCESS) {
1372                 if(result != CUDA_ERROR_NO_DEVICE)
1373                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1374                 return;
1375         }
1376
1377         result = cuDeviceGetCount(&count);
1378         if(result != CUDA_SUCCESS) {
1379                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1380                 return;
1381         }
1382
1383         vector<DeviceInfo> display_devices;
1384
1385         for(int num = 0; num < count; num++) {
1386                 char name[256];
1387                 int attr;
1388
1389                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1390                         continue;
1391
1392                 int major, minor;
1393                 cuDeviceComputeCapability(&major, &minor, num);
1394                 if(major < 2) {
1395                         continue;
1396                 }
1397
1398                 DeviceInfo info;
1399
1400                 info.type = DEVICE_CUDA;
1401                 info.description = string(name);
1402                 info.id = string_printf("CUDA_%d", num);
1403                 info.num = num;
1404
1405                 info.advanced_shading = (major >= 2);
1406                 info.has_bindless_textures = (major >= 3);
1407                 info.pack_images = false;
1408
1409                 /* if device has a kernel timeout, assume it is used for display */
1410                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1411                         info.description += " (Display)";
1412                         info.display_device = true;
1413                         display_devices.push_back(info);
1414                 }
1415                 else
1416                         devices.push_back(info);
1417         }
1418
1419         if(!display_devices.empty())
1420                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1421 }
1422
1423 string device_cuda_capabilities(void)
1424 {
1425         CUresult result = cuInit(0);
1426         if(result != CUDA_SUCCESS) {
1427                 if(result != CUDA_ERROR_NO_DEVICE) {
1428                         return string("Error initializing CUDA: ") + cuewErrorString(result);
1429                 }
1430                 return "No CUDA device found\n";
1431         }
1432
1433         int count;
1434         result = cuDeviceGetCount(&count);
1435         if(result != CUDA_SUCCESS) {
1436                 return string("Error getting devices: ") + cuewErrorString(result);
1437         }
1438
1439         string capabilities = "";
1440         for(int num = 0; num < count; num++) {
1441                 char name[256];
1442                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
1443                         continue;
1444                 }
1445                 capabilities += string("\t") + name + "\n";
1446                 int value;
1447 #define GET_ATTR(attr) \
1448                 { \
1449                         if(cuDeviceGetAttribute(&value, \
1450                                                 CU_DEVICE_ATTRIBUTE_##attr, \
1451                                                 num) == CUDA_SUCCESS) \
1452                         { \
1453                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
1454                                                               value); \
1455                         } \
1456                 } (void)0
1457                 /* TODO(sergey): Strip all attributes which are not useful for us
1458                  * or does not depend on the driver.
1459                  */
1460                 GET_ATTR(MAX_THREADS_PER_BLOCK);
1461                 GET_ATTR(MAX_BLOCK_DIM_X);
1462                 GET_ATTR(MAX_BLOCK_DIM_Y);
1463                 GET_ATTR(MAX_BLOCK_DIM_Z);
1464                 GET_ATTR(MAX_GRID_DIM_X);
1465                 GET_ATTR(MAX_GRID_DIM_Y);
1466                 GET_ATTR(MAX_GRID_DIM_Z);
1467                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
1468                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
1469                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
1470                 GET_ATTR(WARP_SIZE);
1471                 GET_ATTR(MAX_PITCH);
1472                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
1473                 GET_ATTR(REGISTERS_PER_BLOCK);
1474                 GET_ATTR(CLOCK_RATE);
1475                 GET_ATTR(TEXTURE_ALIGNMENT);
1476                 GET_ATTR(GPU_OVERLAP);
1477                 GET_ATTR(MULTIPROCESSOR_COUNT);
1478                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
1479                 GET_ATTR(INTEGRATED);
1480                 GET_ATTR(CAN_MAP_HOST_MEMORY);
1481                 GET_ATTR(COMPUTE_MODE);
1482                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
1483                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
1484                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
1485                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
1486                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
1487                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
1488                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
1489                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
1490                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
1491                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
1492                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
1493                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
1494                 GET_ATTR(SURFACE_ALIGNMENT);
1495                 GET_ATTR(CONCURRENT_KERNELS);
1496                 GET_ATTR(ECC_ENABLED);
1497                 GET_ATTR(TCC_DRIVER);
1498                 GET_ATTR(MEMORY_CLOCK_RATE);
1499                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
1500                 GET_ATTR(L2_CACHE_SIZE);
1501                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
1502                 GET_ATTR(ASYNC_ENGINE_COUNT);
1503                 GET_ATTR(UNIFIED_ADDRESSING);
1504                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
1505                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
1506                 GET_ATTR(CAN_TEX2D_GATHER);
1507                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
1508                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
1509                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
1510                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
1511                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
1512                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
1513                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
1514                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
1515                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
1516                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
1517                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
1518                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
1519                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
1520                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
1521                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
1522                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
1523                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
1524                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
1525                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
1526                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
1527                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
1528                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
1529                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
1530                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
1531                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
1532                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
1533                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
1534                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
1535                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
1536                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
1537                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
1538                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
1539                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
1540                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
1541                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
1542                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
1543                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
1544                 GET_ATTR(MANAGED_MEMORY);
1545                 GET_ATTR(MULTI_GPU_BOARD);
1546                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
1547 #undef GET_ATTR
1548                 capabilities += "\n";
1549         }
1550
1551         return capabilities;
1552 }
1553
1554 CCL_NAMESPACE_END