844fb3b8d50cb9c4c424a7dd8fe19fb4b2a3375b
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #include "cuew.h"
27 #include "util_debug.h"
28 #include "util_map.h"
29 #include "util_opengl.h"
30 #include "util_path.h"
31 #include "util_system.h"
32 #include "util_types.h"
33 #include "util_time.h"
34
35 CCL_NAMESPACE_BEGIN
36
37 class CUDADevice : public Device
38 {
39 public:
40         DedicatedTaskPool task_pool;
41         CUdevice cuDevice;
42         CUcontext cuContext;
43         CUmodule cuModule;
44         map<device_ptr, bool> tex_interp_map;
45         int cuDevId;
46         int cuDevArchitecture;
47         bool first_error;
48         bool use_texture_storage;
49
50         struct PixelMem {
51                 GLuint cuPBO;
52                 CUgraphicsResource cuPBOresource;
53                 GLuint cuTexId;
54                 int w, h;
55         };
56
57         map<device_ptr, PixelMem> pixel_mem_map;
58
59         CUdeviceptr cuda_device_ptr(device_ptr mem)
60         {
61                 return (CUdeviceptr)mem;
62         }
63
64         static bool have_precompiled_kernels()
65         {
66                 string cubins_path = path_get("lib");
67                 return path_exists(cubins_path);
68         }
69
70 /*#ifdef NDEBUG
71 #define cuda_abort()
72 #else
73 #define cuda_abort() abort()
74 #endif*/
75         void cuda_error_documentation()
76         {
77                 if(first_error) {
78                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
79                         fprintf(stderr, "http://wiki.blender.org/index.php/Doc:2.6/Manual/Render/Cycles/GPU_Rendering\n\n");
80                         first_error = false;
81                 }
82         }
83
84 #define cuda_assert(stmt) \
85         { \
86                 CUresult result = stmt; \
87                 \
88                 if(result != CUDA_SUCCESS) { \
89                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
90                         if(error_msg == "") \
91                                 error_msg = message; \
92                         fprintf(stderr, "%s\n", message.c_str()); \
93                         /*cuda_abort();*/ \
94                         cuda_error_documentation(); \
95                 } \
96         } (void)0
97
98         bool cuda_error_(CUresult result, const string& stmt)
99         {
100                 if(result == CUDA_SUCCESS)
101                         return false;
102
103                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
104                 if(error_msg == "")
105                         error_msg = message;
106                 fprintf(stderr, "%s\n", message.c_str());
107                 cuda_error_documentation();
108                 return true;
109         }
110
111 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
112
113         void cuda_error_message(const string& message)
114         {
115                 if(error_msg == "")
116                         error_msg = message;
117                 fprintf(stderr, "%s\n", message.c_str());
118                 cuda_error_documentation();
119         }
120
121         void cuda_push_context()
122         {
123                 cuda_assert(cuCtxSetCurrent(cuContext));
124         }
125
126         void cuda_pop_context()
127         {
128                 cuda_assert(cuCtxSetCurrent(NULL));
129         }
130
131         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
132         : Device(info, stats, background_)
133         {
134                 first_error = true;
135                 background = background_;
136                 use_texture_storage = true;
137
138                 cuDevId = info.num;
139                 cuDevice = 0;
140                 cuContext = 0;
141
142                 /* intialize */
143                 if(cuda_error(cuInit(0)))
144                         return;
145
146                 /* setup device and context */
147                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
148                         return;
149
150                 CUresult result;
151
152                 if(background) {
153                         result = cuCtxCreate(&cuContext, 0, cuDevice);
154                 }
155                 else {
156                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
157
158                         if(result != CUDA_SUCCESS) {
159                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
160                                 background = true;
161                         }
162                 }
163
164                 if(cuda_error_(result, "cuCtxCreate"))
165                         return;
166
167                 int major, minor;
168                 cuDeviceComputeCapability(&major, &minor, cuDevId);
169                 cuDevArchitecture = major*100 + minor*10;
170
171                 /* In order to use full 6GB of memory on Titan cards, use arrays instead
172                  * of textures. On earlier cards this seems slower, but on Titan it is
173                  * actually slightly faster in tests. */
174                 use_texture_storage = (cuDevArchitecture < 300);
175
176                 cuda_pop_context();
177         }
178
179         ~CUDADevice()
180         {
181                 task_pool.stop();
182
183                 cuda_assert(cuCtxDestroy(cuContext));
184         }
185
186         bool support_device(bool experimental)
187         {
188                 int major, minor;
189                 cuDeviceComputeCapability(&major, &minor, cuDevId);
190                 
191                 /* We only support sm_20 and above */
192                 if(major < 2) {
193                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
194                         return false;
195                 }
196                 
197                 return true;
198         }
199
200         string compile_kernel(bool experimental)
201         {
202                 /* compute cubin name */
203                 int major, minor;
204                 cuDeviceComputeCapability(&major, &minor, cuDevId);
205                 
206                 /* workaround to make sm_52 cards work, until we bundle kernel */
207                 if(major == 5 && minor == 2)
208                         minor = 0;
209
210                 /* attempt to use kernel provided with blender */
211                 string cubin;
212                 if(experimental)
213                         cubin = path_get(string_printf("lib/kernel_experimental_sm_%d%d.cubin", major, minor));
214                 else
215                         cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
216                 if(path_exists(cubin))
217                         return cubin;
218
219                 /* not found, try to use locally compiled kernel */
220                 string kernel_path = path_get("kernel");
221                 string md5 = path_files_md5_hash(kernel_path);
222
223                 if(experimental)
224                         cubin = string_printf("cycles_kernel_experimental_sm%d%d_%s.cubin", major, minor, md5.c_str());
225                 else
226                         cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
227                 cubin = path_user_get(path_join("cache", cubin));
228
229                 /* if exists already, use it */
230                 if(path_exists(cubin))
231                         return cubin;
232
233 #ifdef _WIN32
234                 if(have_precompiled_kernels()) {
235                         if(major < 2)
236                                 cuda_error_message(string_printf("CUDA device requires compute capability 2.0 or up, found %d.%d. Your GPU is not supported.", major, minor));
237                         else
238                                 cuda_error_message(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
239                         return "";
240                 }
241 #endif
242
243                 /* if not, find CUDA compiler */
244                 const char *nvcc = cuewCompilerPath();
245
246                 if(nvcc == NULL) {
247                         cuda_error_message("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
248                         return "";
249                 }
250
251                 int cuda_version = cuewCompilerVersion();
252
253                 if(cuda_version == 0) {
254                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
255                         return "";
256                 }
257                 if(cuda_version < 60) {
258                         printf("Unsupported CUDA version %d.%d detected, you need CUDA 6.5.\n", cuda_version/10, cuda_version%10);
259                         return "";
260                 }
261                 else if(cuda_version != 65)
262                         printf("CUDA version %d.%d detected, build may succeed but only CUDA 6.5 is officially supported.\n", cuda_version/10, cuda_version%10);
263
264                 /* compile */
265                 string kernel = path_join(kernel_path, "kernel.cu");
266                 string include = kernel_path;
267                 const int machine = system_cpu_bits();
268
269                 double starttime = time_dt();
270                 printf("Compiling CUDA kernel ...\n");
271
272                 path_create_directories(cubin);
273
274                 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
275                         "-o \"%s\" --ptxas-options=\"-v\" -I\"%s\" -DNVCC -D__KERNEL_CUDA_VERSION__=%d",
276                         nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
277                 
278                 if(experimental)
279                         command += " -D__KERNEL_CUDA_EXPERIMENTAL__";
280
281 #ifdef WITH_CYCLES_DEBUG
282                 command += " -D__KERNEL_DEBUG__";
283 #endif
284
285                 printf("%s\n", command.c_str());
286
287                 if(system(command.c_str()) == -1) {
288                         cuda_error_message("Failed to execute compilation command, see console for details.");
289                         return "";
290                 }
291
292                 /* verify if compilation succeeded */
293                 if(!path_exists(cubin)) {
294                         cuda_error_message("CUDA kernel compilation failed, see console for details.");
295                         return "";
296                 }
297
298                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
299
300                 return cubin;
301         }
302
303         bool load_kernels(bool experimental)
304         {
305                 /* check if cuda init succeeded */
306                 if(cuContext == 0)
307                         return false;
308                 
309                 /* check if GPU is supported */
310                 if(!support_device(experimental))
311                         return false;
312
313                 /* get kernel */
314                 string cubin = compile_kernel(experimental);
315
316                 if(cubin == "")
317                         return false;
318
319                 /* open module */
320                 cuda_push_context();
321
322                 string cubin_data;
323                 CUresult result;
324
325                 if (path_read_text(cubin, cubin_data))
326                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
327                 else
328                         result = CUDA_ERROR_FILE_NOT_FOUND;
329
330                 if(cuda_error_(result, "cuModuleLoad"))
331                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
332
333                 cuda_pop_context();
334
335                 return (result == CUDA_SUCCESS);
336         }
337
338         void mem_alloc(device_memory& mem, MemoryType type)
339         {
340                 cuda_push_context();
341                 CUdeviceptr device_pointer;
342                 size_t size = mem.memory_size();
343                 cuda_assert(cuMemAlloc(&device_pointer, size));
344                 mem.device_pointer = (device_ptr)device_pointer;
345                 mem.device_size = size;
346                 stats.mem_alloc(size);
347                 cuda_pop_context();
348         }
349
350         void mem_copy_to(device_memory& mem)
351         {
352                 cuda_push_context();
353                 if(mem.device_pointer)
354                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
355                 cuda_pop_context();
356         }
357
358         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
359         {
360                 size_t offset = elem*y*w;
361                 size_t size = elem*w*h;
362
363                 cuda_push_context();
364                 if(mem.device_pointer) {
365                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
366                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
367                 }
368                 else {
369                         memset((char*)mem.data_pointer + offset, 0, size);
370                 }
371                 cuda_pop_context();
372         }
373
374         void mem_zero(device_memory& mem)
375         {
376                 memset((void*)mem.data_pointer, 0, mem.memory_size());
377
378                 cuda_push_context();
379                 if(mem.device_pointer)
380                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
381                 cuda_pop_context();
382         }
383
384         void mem_free(device_memory& mem)
385         {
386                 if(mem.device_pointer) {
387                         cuda_push_context();
388                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
389                         cuda_pop_context();
390
391                         mem.device_pointer = 0;
392
393                         stats.mem_free(mem.device_size);
394                         mem.device_size = 0;
395                 }
396         }
397
398         void const_copy_to(const char *name, void *host, size_t size)
399         {
400                 CUdeviceptr mem;
401                 size_t bytes;
402
403                 cuda_push_context();
404                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
405                 //assert(bytes == size);
406                 cuda_assert(cuMemcpyHtoD(mem, host, size));
407                 cuda_pop_context();
408         }
409
410         void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
411         {
412                 /* todo: support 3D textures, only CPU for now */
413
414                 /* determine format */
415                 CUarray_format_enum format;
416                 size_t dsize = datatype_size(mem.data_type);
417                 size_t size = mem.memory_size();
418                 bool use_texture = (interpolation != INTERPOLATION_NONE) || use_texture_storage;
419
420                 if(use_texture) {
421
422                         switch(mem.data_type) {
423                                 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
424                                 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
425                                 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
426                                 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
427                                 default: assert(0); return;
428                         }
429
430                         CUtexref texref = NULL;
431
432                         cuda_push_context();
433                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, name));
434
435                         if(!texref) {
436                                 cuda_pop_context();
437                                 return;
438                         }
439
440                         if(interpolation != INTERPOLATION_NONE) {
441                                 CUarray handle = NULL;
442                                 CUDA_ARRAY_DESCRIPTOR desc;
443
444                                 desc.Width = mem.data_width;
445                                 desc.Height = mem.data_height;
446                                 desc.Format = format;
447                                 desc.NumChannels = mem.data_elements;
448
449                                 cuda_assert(cuArrayCreate(&handle, &desc));
450
451                                 if(!handle) {
452                                         cuda_pop_context();
453                                         return;
454                                 }
455
456                                 if(mem.data_height > 1) {
457                                         CUDA_MEMCPY2D param;
458                                         memset(&param, 0, sizeof(param));
459                                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
460                                         param.dstArray = handle;
461                                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
462                                         param.srcHost = (void*)mem.data_pointer;
463                                         param.srcPitch = mem.data_width*dsize*mem.data_elements;
464                                         param.WidthInBytes = param.srcPitch;
465                                         param.Height = mem.data_height;
466
467                                         cuda_assert(cuMemcpy2D(&param));
468                                 }
469                                 else
470                                         cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
471
472                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
473
474                                 if(interpolation == INTERPOLATION_CLOSEST) {
475                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
476                                 }
477                                 else if (interpolation == INTERPOLATION_LINEAR) {
478                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
479                                 }
480                                 else {/* CUBIC and SMART are unsupported for CUDA */
481                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
482                                 }
483                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
484
485                                 mem.device_pointer = (device_ptr)handle;
486                                 mem.device_size = size;
487
488                                 stats.mem_alloc(size);
489                         }
490                         else {
491                                 cuda_pop_context();
492
493                                 mem_alloc(mem, MEM_READ_ONLY);
494                                 mem_copy_to(mem);
495
496                                 cuda_push_context();
497
498                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
499                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
500                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
501                         }
502
503                         if(periodic) {
504                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_WRAP));
505                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_WRAP));
506                         }
507                         else {
508                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_CLAMP));
509                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_CLAMP));
510                         }
511                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
512
513                         cuda_pop_context();
514                 }
515                 else {
516                         mem_alloc(mem, MEM_READ_ONLY);
517                         mem_copy_to(mem);
518
519                         cuda_push_context();
520
521                         CUdeviceptr cumem;
522                         size_t cubytes;
523
524                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, name));
525
526                         if(cubytes == 8) {
527                                 /* 64 bit device pointer */
528                                 uint64_t ptr = mem.device_pointer;
529                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
530                         }
531                         else {
532                                 /* 32 bit device pointer */
533                                 uint32_t ptr = (uint32_t)mem.device_pointer;
534                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
535                         }
536
537                         cuda_pop_context();
538                 }
539
540                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
541         }
542
543         void tex_free(device_memory& mem)
544         {
545                 if(mem.device_pointer) {
546                         if(tex_interp_map[mem.device_pointer]) {
547                                 cuda_push_context();
548                                 cuArrayDestroy((CUarray)mem.device_pointer);
549                                 cuda_pop_context();
550
551                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
552                                 mem.device_pointer = 0;
553
554                                 stats.mem_free(mem.device_size);
555                                 mem.device_size = 0;
556                         }
557                         else {
558                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
559                                 mem_free(mem);
560                         }
561                 }
562         }
563
564         void path_trace(RenderTile& rtile, int sample, bool branched)
565         {
566                 if(have_error())
567                         return;
568
569                 cuda_push_context();
570
571                 CUfunction cuPathTrace;
572                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
573                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
574
575                 /* get kernel function */
576                 if(branched) {
577                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
578                 }
579                 else {
580                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
581                 }
582
583                 if(have_error())
584                         return;
585
586                 /* pass in parameters */
587                 void *args[] = {&d_buffer,
588                                                  &d_rng_state,
589                                                  &sample,
590                                                  &rtile.x,
591                                                  &rtile.y,
592                                                  &rtile.w,
593                                                  &rtile.h,
594                                                  &rtile.offset,
595                                                  &rtile.stride};
596
597                 /* launch kernel */
598                 int threads_per_block;
599                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
600
601                 /*int num_registers;
602                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
603
604                 printf("threads_per_block %d\n", threads_per_block);
605                 printf("num_registers %d\n", num_registers);*/
606
607                 int xthreads = (int)sqrt((float)threads_per_block);
608                 int ythreads = (int)sqrt((float)threads_per_block);
609                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
610                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
611
612                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
613
614                 cuda_assert(cuLaunchKernel(cuPathTrace,
615                                                                    xblocks , yblocks, 1, /* blocks */
616                                                                    xthreads, ythreads, 1, /* threads */
617                                                                    0, 0, args, 0));
618
619                 cuda_assert(cuCtxSynchronize());
620
621                 cuda_pop_context();
622         }
623
624         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
625         {
626                 if(have_error())
627                         return;
628
629                 cuda_push_context();
630
631                 CUfunction cuFilmConvert;
632                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
633                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
634
635                 /* get kernel function */
636                 if(rgba_half) {
637                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
638                 }
639                 else {
640                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
641                 }
642
643
644                 float sample_scale = 1.0f/(task.sample + 1);
645
646                 /* pass in parameters */
647                 void *args[] = {&d_rgba,
648                                                  &d_buffer,
649                                                  &sample_scale,
650                                                  &task.x,
651                                                  &task.y,
652                                                  &task.w,
653                                                  &task.h,
654                                                  &task.offset,
655                                                  &task.stride};
656
657                 /* launch kernel */
658                 int threads_per_block;
659                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
660
661                 int xthreads = (int)sqrt((float)threads_per_block);
662                 int ythreads = (int)sqrt((float)threads_per_block);
663                 int xblocks = (task.w + xthreads - 1)/xthreads;
664                 int yblocks = (task.h + ythreads - 1)/ythreads;
665
666                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
667
668                 cuda_assert(cuLaunchKernel(cuFilmConvert,
669                                                                    xblocks , yblocks, 1, /* blocks */
670                                                                    xthreads, ythreads, 1, /* threads */
671                                                                    0, 0, args, 0));
672
673                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
674
675                 cuda_pop_context();
676         }
677
678         void shader(DeviceTask& task)
679         {
680                 if(have_error())
681                         return;
682
683                 cuda_push_context();
684
685                 CUfunction cuShader;
686                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
687                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
688
689                 /* get kernel function */
690                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
691                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
692                 }
693                 else {
694                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
695                 }
696
697                 /* do tasks in smaller chunks, so we can cancel it */
698                 const int shader_chunk_size = 65536;
699                 const int start = task.shader_x;
700                 const int end = task.shader_x + task.shader_w;
701                 int offset = task.offset;
702
703                 bool canceled = false;
704                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
705                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
706                                 int shader_w = min(shader_chunk_size, end - shader_x);
707
708                                 /* pass in parameters */
709                                 void *args[] = {&d_input,
710                                                                  &d_output,
711                                                                  &task.shader_eval_type,
712                                                                  &shader_x,
713                                                                  &shader_w,
714                                                                  &offset,
715                                                                  &sample};
716
717                                 /* launch kernel */
718                                 int threads_per_block;
719                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
720
721                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
722
723                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
724                                 cuda_assert(cuLaunchKernel(cuShader,
725                                                                                    xblocks , 1, 1, /* blocks */
726                                                                                    threads_per_block, 1, 1, /* threads */
727                                                                                    0, 0, args, 0));
728
729                                 cuda_assert(cuCtxSynchronize());
730
731                                 if(task.get_cancel()) {
732                                         canceled = false;
733                                         break;
734                                 }
735                         }
736
737                         task.update_progress(NULL);
738                 }
739
740                 cuda_pop_context();
741         }
742
743         CUdeviceptr map_pixels(device_ptr mem)
744         {
745                 if(!background) {
746                         PixelMem pmem = pixel_mem_map[mem];
747                         CUdeviceptr buffer;
748                         
749                         size_t bytes;
750                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
751                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
752                         
753                         return buffer;
754                 }
755
756                 return cuda_device_ptr(mem);
757         }
758
759         void unmap_pixels(device_ptr mem)
760         {
761                 if(!background) {
762                         PixelMem pmem = pixel_mem_map[mem];
763
764                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
765                 }
766         }
767
768         void pixels_alloc(device_memory& mem)
769         {
770                 if(!background) {
771                         PixelMem pmem;
772
773                         pmem.w = mem.data_width;
774                         pmem.h = mem.data_height;
775
776                         cuda_push_context();
777
778                         glGenBuffers(1, &pmem.cuPBO);
779                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
780                         if(mem.data_type == TYPE_HALF)
781                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
782                         else
783                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
784                         
785                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
786                         
787                         glGenTextures(1, &pmem.cuTexId);
788                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
789                         if(mem.data_type == TYPE_HALF)
790                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
791                         else
792                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
793                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
794                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
795                         glBindTexture(GL_TEXTURE_2D, 0);
796                         
797                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
798
799                         if(result == CUDA_SUCCESS) {
800                                 cuda_pop_context();
801
802                                 mem.device_pointer = pmem.cuTexId;
803                                 pixel_mem_map[mem.device_pointer] = pmem;
804
805                                 mem.device_size = mem.memory_size();
806                                 stats.mem_alloc(mem.device_size);
807
808                                 return;
809                         }
810                         else {
811                                 /* failed to register buffer, fallback to no interop */
812                                 glDeleteBuffers(1, &pmem.cuPBO);
813                                 glDeleteTextures(1, &pmem.cuTexId);
814
815                                 cuda_pop_context();
816
817                                 background = true;
818                         }
819                 }
820
821                 Device::pixels_alloc(mem);
822         }
823
824         void pixels_copy_from(device_memory& mem, int y, int w, int h)
825         {
826                 if(!background) {
827                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
828
829                         cuda_push_context();
830
831                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
832                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
833                         size_t offset = sizeof(uchar)*4*y*w;
834                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
835                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
836                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
837
838                         cuda_pop_context();
839
840                         return;
841                 }
842
843                 Device::pixels_copy_from(mem, y, w, h);
844         }
845
846         void pixels_free(device_memory& mem)
847         {
848                 if(mem.device_pointer) {
849                         if(!background) {
850                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
851
852                                 cuda_push_context();
853
854                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
855                                 glDeleteBuffers(1, &pmem.cuPBO);
856                                 glDeleteTextures(1, &pmem.cuTexId);
857
858                                 cuda_pop_context();
859
860                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
861                                 mem.device_pointer = 0;
862
863                                 stats.mem_free(mem.device_size);
864                                 mem.device_size = 0;
865
866                                 return;
867                         }
868
869                         Device::pixels_free(mem);
870                 }
871         }
872
873         void draw_pixels(device_memory& mem, int y, int w, int h, int dy, int width, int height, bool transparent,
874                 const DeviceDrawParams &draw_params)
875         {
876                 if(!background) {
877                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
878
879                         cuda_push_context();
880
881                         /* for multi devices, this assumes the inefficient method that we allocate
882                          * all pixels on the device even though we only render to a subset */
883                         size_t offset = 4*y*w;
884
885                         if(mem.data_type == TYPE_HALF)
886                                 offset *= sizeof(GLhalf);
887                         else
888                                 offset *= sizeof(uint8_t);
889
890                         glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pmem.cuPBO);
891                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
892                         if(mem.data_type == TYPE_HALF)
893                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
894                         else
895                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
896                         glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
897                         
898                         glEnable(GL_TEXTURE_2D);
899                         
900                         if(transparent) {
901                                 glEnable(GL_BLEND);
902                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
903                         }
904
905                         glColor3f(1.0f, 1.0f, 1.0f);
906
907                         if(draw_params.bind_display_space_shader_cb) {
908                                 draw_params.bind_display_space_shader_cb();
909                         }
910
911                         glPushMatrix();
912                         glTranslatef(0.0f, (float)dy, 0.0f);
913                                 
914                         glBegin(GL_QUADS);
915                         
916                         glTexCoord2f(0.0f, 0.0f);
917                         glVertex2f(0.0f, 0.0f);
918                         glTexCoord2f((float)w/(float)pmem.w, 0.0f);
919                         glVertex2f((float)width, 0.0f);
920                         glTexCoord2f((float)w/(float)pmem.w, (float)h/(float)pmem.h);
921                         glVertex2f((float)width, (float)height);
922                         glTexCoord2f(0.0f, (float)h/(float)pmem.h);
923                         glVertex2f(0.0f, (float)height);
924
925                         glEnd();
926
927                         glPopMatrix();
928
929                         if(draw_params.unbind_display_space_shader_cb) {
930                                 draw_params.unbind_display_space_shader_cb();
931                         }
932
933                         if(transparent)
934                                 glDisable(GL_BLEND);
935                         
936                         glBindTexture(GL_TEXTURE_2D, 0);
937                         glDisable(GL_TEXTURE_2D);
938
939                         cuda_pop_context();
940
941                         return;
942                 }
943
944                 Device::draw_pixels(mem, y, w, h, dy, width, height, transparent, draw_params);
945         }
946
947         void thread_run(DeviceTask *task)
948         {
949                 if(task->type == DeviceTask::PATH_TRACE) {
950                         RenderTile tile;
951                         
952                         bool branched = task->integrator_branched;
953                         
954                         /* keep rendering tiles until done */
955                         while(task->acquire_tile(this, tile)) {
956                                 int start_sample = tile.start_sample;
957                                 int end_sample = tile.start_sample + tile.num_samples;
958
959                                 for(int sample = start_sample; sample < end_sample; sample++) {
960                                         if (task->get_cancel()) {
961                                                 if(task->need_finish_queue == false)
962                                                         break;
963                                         }
964
965                                         path_trace(tile, sample, branched);
966
967                                         tile.sample = sample + 1;
968
969                                         task->update_progress(&tile);
970                                 }
971
972                                 task->release_tile(tile);
973                         }
974                 }
975                 else if(task->type == DeviceTask::SHADER) {
976                         shader(*task);
977
978                         cuda_push_context();
979                         cuda_assert(cuCtxSynchronize());
980                         cuda_pop_context();
981                 }
982         }
983
984         class CUDADeviceTask : public DeviceTask {
985         public:
986                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
987                 : DeviceTask(task)
988                 {
989                         run = function_bind(&CUDADevice::thread_run, device, this);
990                 }
991         };
992
993         int get_split_task_count(DeviceTask& task)
994         {
995                 return 1;
996         }
997
998         void task_add(DeviceTask& task)
999         {
1000                 if(task.type == DeviceTask::FILM_CONVERT) {
1001                         /* must be done in main thread due to opengl access */
1002                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1003
1004                         cuda_push_context();
1005                         cuda_assert(cuCtxSynchronize());
1006                         cuda_pop_context();
1007                 }
1008                 else {
1009                         task_pool.push(new CUDADeviceTask(this, task));
1010                 }
1011         }
1012
1013         void task_wait()
1014         {
1015                 task_pool.wait();
1016         }
1017
1018         void task_cancel()
1019         {
1020                 task_pool.cancel();
1021         }
1022 };
1023
1024 bool device_cuda_init(void)
1025 {
1026         static bool initialized = false;
1027         static bool result = false;
1028
1029         if (initialized)
1030                 return result;
1031
1032         initialized = true;
1033
1034         if (cuewInit() == CUEW_SUCCESS) {
1035                 if(CUDADevice::have_precompiled_kernels())
1036                         result = true;
1037 #ifndef _WIN32
1038                 else if(cuewCompilerPath() != NULL)
1039                         result = true;
1040 #endif
1041         }
1042
1043         return result;
1044 }
1045
1046 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1047 {
1048         return new CUDADevice(info, stats, background);
1049 }
1050
1051 void device_cuda_info(vector<DeviceInfo>& devices)
1052 {
1053         CUresult result;
1054         int count = 0;
1055
1056         result = cuInit(0);
1057         if(result != CUDA_SUCCESS) {
1058                 if(result != CUDA_ERROR_NO_DEVICE)
1059                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1060                 return;
1061         }
1062
1063         result = cuDeviceGetCount(&count);
1064         if(result != CUDA_SUCCESS) {
1065                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1066                 return;
1067         }
1068         
1069         vector<DeviceInfo> display_devices;
1070         
1071         for(int num = 0; num < count; num++) {
1072                 char name[256];
1073                 int attr;
1074                 
1075                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1076                         continue;
1077
1078                 DeviceInfo info;
1079
1080                 info.type = DEVICE_CUDA;
1081                 info.description = string(name);
1082                 info.id = string_printf("CUDA_%d", num);
1083                 info.num = num;
1084
1085                 int major, minor;
1086                 cuDeviceComputeCapability(&major, &minor, num);
1087                 info.advanced_shading = (major >= 2);
1088                 info.extended_images = (major >= 3);
1089                 info.pack_images = false;
1090
1091                 /* if device has a kernel timeout, assume it is used for display */
1092                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1093                         info.display_device = true;
1094                         display_devices.push_back(info);
1095                 }
1096                 else
1097                         devices.push_back(info);
1098         }
1099
1100         if(!display_devices.empty())
1101                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1102 }
1103
1104 CCL_NAMESPACE_END
1105