Fix T41222 Blender gives weird output when baking (4096*4096) resolution on GPU
[blender-staging.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License
15  */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20
21 #include "device.h"
22 #include "device_intern.h"
23
24 #include "buffers.h"
25
26 #include "cuew.h"
27 #include "util_debug.h"
28 #include "util_foreach.h"
29 #include "util_map.h"
30 #include "util_opengl.h"
31 #include "util_path.h"
32 #include "util_system.h"
33 #include "util_types.h"
34 #include "util_time.h"
35
36 CCL_NAMESPACE_BEGIN
37
38 class CUDADevice : public Device
39 {
40 public:
41         DedicatedTaskPool task_pool;
42         CUdevice cuDevice;
43         CUcontext cuContext;
44         CUmodule cuModule;
45         map<device_ptr, bool> tex_interp_map;
46         int cuDevId;
47         int cuDevArchitecture;
48         bool first_error;
49         bool use_texture_storage;
50
51         struct PixelMem {
52                 GLuint cuPBO;
53                 CUgraphicsResource cuPBOresource;
54                 GLuint cuTexId;
55                 int w, h;
56         };
57
58         map<device_ptr, PixelMem> pixel_mem_map;
59
60         CUdeviceptr cuda_device_ptr(device_ptr mem)
61         {
62                 return (CUdeviceptr)mem;
63         }
64
65         static bool have_precompiled_kernels()
66         {
67                 string cubins_path = path_get("lib");
68                 return path_exists(cubins_path);
69         }
70
71 /*#ifdef NDEBUG
72 #define cuda_abort()
73 #else
74 #define cuda_abort() abort()
75 #endif*/
76         void cuda_error_documentation()
77         {
78                 if(first_error) {
79                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
80                         fprintf(stderr, "http://wiki.blender.org/index.php/Doc:2.6/Manual/Render/Cycles/GPU_Rendering\n\n");
81                         first_error = false;
82                 }
83         }
84
85 #define cuda_assert(stmt) \
86         { \
87                 CUresult result = stmt; \
88                 \
89                 if(result != CUDA_SUCCESS) { \
90                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
91                         if(error_msg == "") \
92                                 error_msg = message; \
93                         fprintf(stderr, "%s\n", message.c_str()); \
94                         /*cuda_abort();*/ \
95                         cuda_error_documentation(); \
96                 } \
97         } (void)0
98
99         bool cuda_error_(CUresult result, const string& stmt)
100         {
101                 if(result == CUDA_SUCCESS)
102                         return false;
103
104                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
105                 if(error_msg == "")
106                         error_msg = message;
107                 fprintf(stderr, "%s\n", message.c_str());
108                 cuda_error_documentation();
109                 return true;
110         }
111
112 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
113
114         void cuda_error_message(const string& message)
115         {
116                 if(error_msg == "")
117                         error_msg = message;
118                 fprintf(stderr, "%s\n", message.c_str());
119                 cuda_error_documentation();
120         }
121
122         void cuda_push_context()
123         {
124                 cuda_assert(cuCtxSetCurrent(cuContext));
125         }
126
127         void cuda_pop_context()
128         {
129                 cuda_assert(cuCtxSetCurrent(NULL));
130         }
131
132         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
133         : Device(info, stats, background_)
134         {
135                 first_error = true;
136                 background = background_;
137                 use_texture_storage = true;
138
139                 cuDevId = info.num;
140                 cuDevice = 0;
141                 cuContext = 0;
142
143                 /* intialize */
144                 if(cuda_error(cuInit(0)))
145                         return;
146
147                 /* setup device and context */
148                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
149                         return;
150
151                 CUresult result;
152
153                 if(background) {
154                         result = cuCtxCreate(&cuContext, 0, cuDevice);
155                 }
156                 else {
157                         result = cuGLCtxCreate(&cuContext, 0, cuDevice);
158
159                         if(result != CUDA_SUCCESS) {
160                                 result = cuCtxCreate(&cuContext, 0, cuDevice);
161                                 background = true;
162                         }
163                 }
164
165                 if(cuda_error_(result, "cuCtxCreate"))
166                         return;
167
168                 int major, minor;
169                 cuDeviceComputeCapability(&major, &minor, cuDevId);
170                 cuDevArchitecture = major*100 + minor*10;
171
172                 /* In order to use full 6GB of memory on Titan cards, use arrays instead
173                  * of textures. On earlier cards this seems slower, but on Titan it is
174                  * actually slightly faster in tests. */
175                 use_texture_storage = (cuDevArchitecture < 300);
176
177                 cuda_pop_context();
178         }
179
180         ~CUDADevice()
181         {
182                 task_pool.stop();
183
184                 cuda_assert(cuCtxDestroy(cuContext));
185         }
186
187         bool support_device(bool experimental)
188         {
189                 int major, minor;
190                 cuDeviceComputeCapability(&major, &minor, cuDevId);
191                 
192                 /* We only support sm_20 and above */
193                 if(major < 2) {
194                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
195                         return false;
196                 }
197                 
198                 return true;
199         }
200
201         string compile_kernel()
202         {
203                 /* compute cubin name */
204                 int major, minor;
205                 cuDeviceComputeCapability(&major, &minor, cuDevId);
206
207                 /* attempt to use kernel provided with blender */
208                 string cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
209                 if(path_exists(cubin))
210                         return cubin;
211
212                 /* not found, try to use locally compiled kernel */
213                 string kernel_path = path_get("kernel");
214                 string md5 = path_files_md5_hash(kernel_path);
215
216                 cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
217                 cubin = path_user_get(path_join("cache", cubin));
218
219                 /* if exists already, use it */
220                 if(path_exists(cubin))
221                         return cubin;
222
223 #ifdef _WIN32
224                 if(have_precompiled_kernels()) {
225                         if(major < 2)
226                                 cuda_error_message(string_printf("CUDA device requires compute capability 2.0 or up, found %d.%d. Your GPU is not supported.", major, minor));
227                         else
228                                 cuda_error_message(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
229                         return "";
230                 }
231 #endif
232
233                 /* if not, find CUDA compiler */
234                 const char *nvcc = cuewCompilerPath();
235
236                 if(nvcc == NULL) {
237                         cuda_error_message("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
238                         return "";
239                 }
240
241                 int cuda_version = cuewCompilerVersion();
242
243                 if(cuda_version == 0) {
244                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
245                         return "";
246                 }
247                 if(cuda_version < 50) {
248                         printf("Unsupported CUDA version %d.%d detected, you need CUDA 6.0.\n", cuda_version/10, cuda_version%10);
249                         return "";
250                 }
251                 else if(cuda_version != 60)
252                         printf("CUDA version %d.%d detected, build may succeed but only CUDA 6.0 is officially supported.\n", cuda_version/10, cuda_version%10);
253
254                 /* compile */
255                 string kernel = path_join(kernel_path, "kernel.cu");
256                 string include = kernel_path;
257                 const int machine = system_cpu_bits();
258
259                 double starttime = time_dt();
260                 printf("Compiling CUDA kernel ...\n");
261
262                 path_create_directories(cubin);
263
264                 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
265                         "-o \"%s\" --ptxas-options=\"-v\" -I\"%s\" -DNVCC -D__KERNEL_CUDA_VERSION__=%d",
266                         nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
267
268                 printf("%s\n", command.c_str());
269
270                 if(system(command.c_str()) == -1) {
271                         cuda_error_message("Failed to execute compilation command, see console for details.");
272                         return "";
273                 }
274
275                 /* verify if compilation succeeded */
276                 if(!path_exists(cubin)) {
277                         cuda_error_message("CUDA kernel compilation failed, see console for details.");
278                         return "";
279                 }
280
281                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
282
283                 return cubin;
284         }
285
286         bool load_kernels(bool experimental)
287         {
288                 /* check if cuda init succeeded */
289                 if(cuContext == 0)
290                         return false;
291                 
292                 /* check if GPU is supported */
293                 if(!support_device(experimental))
294                         return false;
295
296                 /* get kernel */
297                 string cubin = compile_kernel();
298
299                 if(cubin == "")
300                         return false;
301
302                 /* open module */
303                 cuda_push_context();
304
305                 string cubin_data;
306                 CUresult result;
307
308                 if (path_read_text(cubin, cubin_data))
309                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
310                 else
311                         result = CUDA_ERROR_FILE_NOT_FOUND;
312
313                 if(cuda_error_(result, "cuModuleLoad"))
314                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
315
316                 cuda_pop_context();
317
318                 return (result == CUDA_SUCCESS);
319         }
320
321         void mem_alloc(device_memory& mem, MemoryType type)
322         {
323                 cuda_push_context();
324                 CUdeviceptr device_pointer;
325                 size_t size = mem.memory_size();
326                 cuda_assert(cuMemAlloc(&device_pointer, size));
327                 mem.device_pointer = (device_ptr)device_pointer;
328                 stats.mem_alloc(size);
329                 cuda_pop_context();
330         }
331
332         void mem_copy_to(device_memory& mem)
333         {
334                 cuda_push_context();
335                 if(mem.device_pointer)
336                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
337                 cuda_pop_context();
338         }
339
340         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
341         {
342                 size_t offset = elem*y*w;
343                 size_t size = elem*w*h;
344
345                 cuda_push_context();
346                 if(mem.device_pointer) {
347                         cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
348                                                  (CUdeviceptr)((uchar*)mem.device_pointer + offset), size));
349                 }
350                 else {
351                         memset((char*)mem.data_pointer + offset, 0, size);
352                 }
353                 cuda_pop_context();
354         }
355
356         void mem_zero(device_memory& mem)
357         {
358                 memset((void*)mem.data_pointer, 0, mem.memory_size());
359
360                 cuda_push_context();
361                 if(mem.device_pointer)
362                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
363                 cuda_pop_context();
364         }
365
366         void mem_free(device_memory& mem)
367         {
368                 if(mem.device_pointer) {
369                         cuda_push_context();
370                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
371                         cuda_pop_context();
372
373                         mem.device_pointer = 0;
374
375                         stats.mem_free(mem.memory_size());
376                 }
377         }
378
379         void const_copy_to(const char *name, void *host, size_t size)
380         {
381                 CUdeviceptr mem;
382                 size_t bytes;
383
384                 cuda_push_context();
385                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
386                 //assert(bytes == size);
387                 cuda_assert(cuMemcpyHtoD(mem, host, size));
388                 cuda_pop_context();
389         }
390
391         void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
392         {
393                 /* todo: support 3D textures, only CPU for now */
394
395                 /* determine format */
396                 CUarray_format_enum format;
397                 size_t dsize = datatype_size(mem.data_type);
398                 size_t size = mem.memory_size();
399                 bool use_texture = (interpolation != INTERPOLATION_NONE) || use_texture_storage;
400
401                 if(use_texture) {
402
403                         switch(mem.data_type) {
404                                 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
405                                 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
406                                 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
407                                 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
408                                 default: assert(0); return;
409                         }
410
411                         CUtexref texref = NULL;
412
413                         cuda_push_context();
414                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, name));
415
416                         if(!texref) {
417                                 cuda_pop_context();
418                                 return;
419                         }
420
421                         if(interpolation != INTERPOLATION_NONE) {
422                                 CUarray handle = NULL;
423                                 CUDA_ARRAY_DESCRIPTOR desc;
424
425                                 desc.Width = mem.data_width;
426                                 desc.Height = mem.data_height;
427                                 desc.Format = format;
428                                 desc.NumChannels = mem.data_elements;
429
430                                 cuda_assert(cuArrayCreate(&handle, &desc));
431
432                                 if(!handle) {
433                                         cuda_pop_context();
434                                         return;
435                                 }
436
437                                 if(mem.data_height > 1) {
438                                         CUDA_MEMCPY2D param;
439                                         memset(&param, 0, sizeof(param));
440                                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
441                                         param.dstArray = handle;
442                                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
443                                         param.srcHost = (void*)mem.data_pointer;
444                                         param.srcPitch = mem.data_width*dsize*mem.data_elements;
445                                         param.WidthInBytes = param.srcPitch;
446                                         param.Height = mem.data_height;
447
448                                         cuda_assert(cuMemcpy2D(&param));
449                                 }
450                                 else
451                                         cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size));
452
453                                 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT));
454
455                                 if(interpolation == INTERPOLATION_CLOSEST) {
456                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
457                                 }
458                                 else if (interpolation == INTERPOLATION_LINEAR) {
459                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
460                                 }
461                                 else {/* CUBIC and SMART are unsupported for CUDA */
462                                         cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
463                                 }
464                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
465
466                                 mem.device_pointer = (device_ptr)handle;
467
468                                 stats.mem_alloc(size);
469                         }
470                         else {
471                                 cuda_pop_context();
472
473                                 mem_alloc(mem, MEM_READ_ONLY);
474                                 mem_copy_to(mem);
475
476                                 cuda_push_context();
477
478                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
479                                 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
480                                 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER));
481                         }
482
483                         if(periodic) {
484                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_WRAP));
485                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_WRAP));
486                         }
487                         else {
488                                 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_CLAMP));
489                                 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_CLAMP));
490                         }
491                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
492
493                         cuda_pop_context();
494                 }
495                 else {
496                         mem_alloc(mem, MEM_READ_ONLY);
497                         mem_copy_to(mem);
498
499                         cuda_push_context();
500
501                         CUdeviceptr cumem;
502                         size_t cubytes;
503
504                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, name));
505
506                         if(cubytes == 8) {
507                                 /* 64 bit device pointer */
508                                 uint64_t ptr = mem.device_pointer;
509                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
510                         }
511                         else {
512                                 /* 32 bit device pointer */
513                                 uint32_t ptr = (uint32_t)mem.device_pointer;
514                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
515                         }
516
517                         cuda_pop_context();
518                 }
519
520                 tex_interp_map[mem.device_pointer] = (interpolation != INTERPOLATION_NONE);
521         }
522
523         void tex_free(device_memory& mem)
524         {
525                 if(mem.device_pointer) {
526                         if(tex_interp_map[mem.device_pointer]) {
527                                 cuda_push_context();
528                                 cuArrayDestroy((CUarray)mem.device_pointer);
529                                 cuda_pop_context();
530
531                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
532                                 mem.device_pointer = 0;
533
534                                 stats.mem_free(mem.memory_size());
535                         }
536                         else {
537                                 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
538                                 mem_free(mem);
539                         }
540                 }
541         }
542
543         void path_trace(RenderTile& rtile, int sample, bool branched)
544         {
545                 if(have_error())
546                         return;
547
548                 cuda_push_context();
549
550                 CUfunction cuPathTrace;
551                 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
552                 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
553
554                 /* get kernel function */
555                 if(branched) {
556                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
557                 }
558                 else {
559                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
560                 }
561
562                 if(have_error())
563                         return;
564
565                 /* pass in parameters */
566                 void *args[] = {&d_buffer,
567                                                  &d_rng_state,
568                                                  &sample,
569                                                  &rtile.x,
570                                                  &rtile.y,
571                                                  &rtile.w,
572                                                  &rtile.h,
573                                                  &rtile.offset,
574                                                  &rtile.stride};
575
576                 /* launch kernel */
577                 int threads_per_block;
578                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuPathTrace));
579
580                 /*int num_registers;
581                 cuda_assert(cuFuncGetAttribute(&num_registers, CU_FUNC_ATTRIBUTE_NUM_REGS, cuPathTrace));
582
583                 printf("threads_per_block %d\n", threads_per_block);
584                 printf("num_registers %d\n", num_registers);*/
585
586                 int xthreads = (int)sqrt((float)threads_per_block);
587                 int ythreads = (int)sqrt((float)threads_per_block);
588                 int xblocks = (rtile.w + xthreads - 1)/xthreads;
589                 int yblocks = (rtile.h + ythreads - 1)/ythreads;
590
591                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
592
593                 cuda_assert(cuLaunchKernel(cuPathTrace,
594                                                                    xblocks , yblocks, 1, /* blocks */
595                                                                    xthreads, ythreads, 1, /* threads */
596                                                                    0, 0, args, 0));
597
598                 cuda_assert(cuCtxSynchronize());
599
600                 cuda_pop_context();
601         }
602
603         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
604         {
605                 if(have_error())
606                         return;
607
608                 cuda_push_context();
609
610                 CUfunction cuFilmConvert;
611                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
612                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
613
614                 /* get kernel function */
615                 if(rgba_half) {
616                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
617                 }
618                 else {
619                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
620                 }
621
622
623                 float sample_scale = 1.0f/(task.sample + 1);
624
625                 /* pass in parameters */
626                 void *args[] = {&d_rgba,
627                                                  &d_buffer,
628                                                  &sample_scale,
629                                                  &task.x,
630                                                  &task.y,
631                                                  &task.w,
632                                                  &task.h,
633                                                  &task.offset,
634                                                  &task.stride};
635
636                 /* launch kernel */
637                 int threads_per_block;
638                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
639
640                 int xthreads = (int)sqrt((float)threads_per_block);
641                 int ythreads = (int)sqrt((float)threads_per_block);
642                 int xblocks = (task.w + xthreads - 1)/xthreads;
643                 int yblocks = (task.h + ythreads - 1)/ythreads;
644
645                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
646
647                 cuda_assert(cuLaunchKernel(cuFilmConvert,
648                                                                    xblocks , yblocks, 1, /* blocks */
649                                                                    xthreads, ythreads, 1, /* threads */
650                                                                    0, 0, args, 0));
651
652                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
653
654                 cuda_pop_context();
655         }
656
657         void shader(DeviceTask& task)
658         {
659                 if(have_error())
660                         return;
661
662                 cuda_push_context();
663
664                 CUfunction cuShader;
665                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
666                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
667
668                 /* get kernel function */
669                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
670                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
671                 }
672                 else {
673                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_shader"));
674                 }
675
676                 /* do tasks in smaller chunks, so we can cancel it */
677                 const int shader_chunk_size = 65536;
678                 const int start = task.shader_x;
679                 const int end = task.shader_x + task.shader_w;
680
681                 bool canceled = false;
682                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
683                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
684                                 int shader_w = min(shader_chunk_size, end - shader_x);
685
686                                 /* pass in parameters */
687                                 void *args[] = {&d_input,
688                                                                  &d_output,
689                                                                  &task.shader_eval_type,
690                                                                  &shader_x,
691                                                                  &shader_w,
692                                                                  &sample};
693
694                                 /* launch kernel */
695                                 int threads_per_block;
696                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
697
698                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
699
700                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
701                                 cuda_assert(cuLaunchKernel(cuShader,
702                                                                                    xblocks , 1, 1, /* blocks */
703                                                                                    threads_per_block, 1, 1, /* threads */
704                                                                                    0, 0, args, 0));
705
706                                 cuda_assert(cuCtxSynchronize());
707
708                                 if(task.get_cancel()) {
709                                         canceled = false;
710                                         break;
711                                 }
712                         }
713
714                         task.update_progress(NULL);
715                 }
716
717                 cuda_pop_context();
718         }
719
720         CUdeviceptr map_pixels(device_ptr mem)
721         {
722                 if(!background) {
723                         PixelMem pmem = pixel_mem_map[mem];
724                         CUdeviceptr buffer;
725                         
726                         size_t bytes;
727                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
728                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
729                         
730                         return buffer;
731                 }
732
733                 return cuda_device_ptr(mem);
734         }
735
736         void unmap_pixels(device_ptr mem)
737         {
738                 if(!background) {
739                         PixelMem pmem = pixel_mem_map[mem];
740
741                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
742                 }
743         }
744
745         void pixels_alloc(device_memory& mem)
746         {
747                 if(!background) {
748                         PixelMem pmem;
749
750                         pmem.w = mem.data_width;
751                         pmem.h = mem.data_height;
752
753                         cuda_push_context();
754
755                         glGenBuffers(1, &pmem.cuPBO);
756                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
757                         if(mem.data_type == TYPE_HALF)
758                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
759                         else
760                                 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
761                         
762                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
763                         
764                         glGenTextures(1, &pmem.cuTexId);
765                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
766                         if(mem.data_type == TYPE_HALF)
767                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
768                         else
769                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
770                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
771                         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
772                         glBindTexture(GL_TEXTURE_2D, 0);
773                         
774                         CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
775
776                         if(result == CUDA_SUCCESS) {
777                                 cuda_pop_context();
778
779                                 mem.device_pointer = pmem.cuTexId;
780                                 pixel_mem_map[mem.device_pointer] = pmem;
781
782                                 stats.mem_alloc(mem.memory_size());
783
784                                 return;
785                         }
786                         else {
787                                 /* failed to register buffer, fallback to no interop */
788                                 glDeleteBuffers(1, &pmem.cuPBO);
789                                 glDeleteTextures(1, &pmem.cuTexId);
790
791                                 cuda_pop_context();
792
793                                 background = true;
794                         }
795                 }
796
797                 Device::pixels_alloc(mem);
798         }
799
800         void pixels_copy_from(device_memory& mem, int y, int w, int h)
801         {
802                 if(!background) {
803                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
804
805                         cuda_push_context();
806
807                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
808                         uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
809                         size_t offset = sizeof(uchar)*4*y*w;
810                         memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
811                         glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
812                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
813
814                         cuda_pop_context();
815
816                         return;
817                 }
818
819                 Device::pixels_copy_from(mem, y, w, h);
820         }
821
822         void pixels_free(device_memory& mem)
823         {
824                 if(mem.device_pointer) {
825                         if(!background) {
826                                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
827
828                                 cuda_push_context();
829
830                                 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
831                                 glDeleteBuffers(1, &pmem.cuPBO);
832                                 glDeleteTextures(1, &pmem.cuTexId);
833
834                                 cuda_pop_context();
835
836                                 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
837                                 mem.device_pointer = 0;
838
839                                 stats.mem_free(mem.memory_size());
840
841                                 return;
842                         }
843
844                         Device::pixels_free(mem);
845                 }
846         }
847
848         void draw_pixels(device_memory& mem, int y, int w, int h, int dy, int width, int height, bool transparent,
849                 const DeviceDrawParams &draw_params)
850         {
851                 if(!background) {
852                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
853
854                         cuda_push_context();
855
856                         /* for multi devices, this assumes the inefficient method that we allocate
857                          * all pixels on the device even though we only render to a subset */
858                         size_t offset = 4*y*w;
859
860                         if(mem.data_type == TYPE_HALF)
861                                 offset *= sizeof(GLhalf);
862                         else
863                                 offset *= sizeof(uint8_t);
864
865                         glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pmem.cuPBO);
866                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
867                         if(mem.data_type == TYPE_HALF)
868                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
869                         else
870                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
871                         glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
872                         
873                         glEnable(GL_TEXTURE_2D);
874                         
875                         if(transparent) {
876                                 glEnable(GL_BLEND);
877                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
878                         }
879
880                         glColor3f(1.0f, 1.0f, 1.0f);
881
882                         if(draw_params.bind_display_space_shader_cb) {
883                                 draw_params.bind_display_space_shader_cb();
884                         }
885
886                         glPushMatrix();
887                         glTranslatef(0.0f, (float)dy, 0.0f);
888                                 
889                         glBegin(GL_QUADS);
890                         
891                         glTexCoord2f(0.0f, 0.0f);
892                         glVertex2f(0.0f, 0.0f);
893                         glTexCoord2f((float)w/(float)pmem.w, 0.0f);
894                         glVertex2f((float)width, 0.0f);
895                         glTexCoord2f((float)w/(float)pmem.w, (float)h/(float)pmem.h);
896                         glVertex2f((float)width, (float)height);
897                         glTexCoord2f(0.0f, (float)h/(float)pmem.h);
898                         glVertex2f(0.0f, (float)height);
899
900                         glEnd();
901
902                         glPopMatrix();
903
904                         if(draw_params.unbind_display_space_shader_cb) {
905                                 draw_params.unbind_display_space_shader_cb();
906                         }
907
908                         if(transparent)
909                                 glDisable(GL_BLEND);
910                         
911                         glBindTexture(GL_TEXTURE_2D, 0);
912                         glDisable(GL_TEXTURE_2D);
913
914                         cuda_pop_context();
915
916                         return;
917                 }
918
919                 Device::draw_pixels(mem, y, w, h, dy, width, height, transparent, draw_params);
920         }
921
922         void thread_run(DeviceTask *task)
923         {
924                 if(task->type == DeviceTask::PATH_TRACE) {
925                         RenderTile tile;
926                         
927                         bool branched = task->integrator_branched;
928                         
929                         /* keep rendering tiles until done */
930                         while(task->acquire_tile(this, tile)) {
931                                 int start_sample = tile.start_sample;
932                                 int end_sample = tile.start_sample + tile.num_samples;
933
934                                 for(int sample = start_sample; sample < end_sample; sample++) {
935                                         if (task->get_cancel()) {
936                                                 if(task->need_finish_queue == false)
937                                                         break;
938                                         }
939
940                                         path_trace(tile, sample, branched);
941
942                                         tile.sample = sample + 1;
943
944                                         task->update_progress(&tile);
945                                 }
946
947                                 task->release_tile(tile);
948                         }
949                 }
950                 else if(task->type == DeviceTask::SHADER) {
951                         shader(*task);
952
953                         cuda_push_context();
954                         cuda_assert(cuCtxSynchronize());
955                         cuda_pop_context();
956                 }
957         }
958
959         class CUDADeviceTask : public DeviceTask {
960         public:
961                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
962                 : DeviceTask(task)
963                 {
964                         run = function_bind(&CUDADevice::thread_run, device, this);
965                 }
966         };
967
968         int get_split_task_count(DeviceTask& task)
969         {
970                 if (task.type == DeviceTask::SHADER)
971                         return task.get_subtask_count(TaskScheduler::num_threads(), 1024 * 1024);
972                 else
973                         return 1;
974         }
975
976         void task_add(DeviceTask& task)
977         {
978                 if(task.type == DeviceTask::FILM_CONVERT) {
979                         /* must be done in main thread due to opengl access */
980                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
981
982                         cuda_push_context();
983                         cuda_assert(cuCtxSynchronize());
984                         cuda_pop_context();
985                 }
986                 else if(task.type == DeviceTask::SHADER) {
987                         /* split task into smaller ones */
988                         list<DeviceTask> tasks;
989
990                         task.split(tasks, TaskScheduler::num_threads(), 1024 * 1024);
991
992                         foreach(DeviceTask& task, tasks)
993                                 task_pool.push(new CUDADeviceTask(this, task));
994                 }
995                 else {
996                         task_pool.push(new CUDADeviceTask(this, task));
997                 }
998         }
999
1000         void task_wait()
1001         {
1002                 task_pool.wait();
1003         }
1004
1005         void task_cancel()
1006         {
1007                 task_pool.cancel();
1008         }
1009 };
1010
1011 bool device_cuda_init(void)
1012 {
1013         static bool initialized = false;
1014         static bool result = false;
1015
1016         if (initialized)
1017                 return result;
1018
1019         initialized = true;
1020
1021         if (cuewInit() == CUEW_SUCCESS) {
1022                 if(CUDADevice::have_precompiled_kernels())
1023                         result = true;
1024 #ifndef _WIN32
1025                 else if(cuewCompilerPath() != NULL)
1026                         result = true;
1027 #endif
1028         }
1029
1030         return result;
1031 }
1032
1033 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
1034 {
1035         return new CUDADevice(info, stats, background);
1036 }
1037
1038 void device_cuda_info(vector<DeviceInfo>& devices)
1039 {
1040         CUresult result;
1041         int count = 0;
1042
1043         result = cuInit(0);
1044         if(result != CUDA_SUCCESS) {
1045                 if(result != CUDA_ERROR_NO_DEVICE)
1046                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
1047                 return;
1048         }
1049
1050         result = cuDeviceGetCount(&count);
1051         if(result != CUDA_SUCCESS) {
1052                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
1053                 return;
1054         }
1055         
1056         vector<DeviceInfo> display_devices;
1057         
1058         for(int num = 0; num < count; num++) {
1059                 char name[256];
1060                 int attr;
1061                 
1062                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
1063                         continue;
1064
1065                 DeviceInfo info;
1066
1067                 info.type = DEVICE_CUDA;
1068                 info.description = string(name);
1069                 info.id = string_printf("CUDA_%d", num);
1070                 info.num = num;
1071
1072                 int major, minor;
1073                 cuDeviceComputeCapability(&major, &minor, num);
1074                 info.advanced_shading = (major >= 2);
1075                 info.extended_images = (major >= 3);
1076                 info.pack_images = false;
1077
1078                 /* if device has a kernel timeout, assume it is used for display */
1079                 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
1080                         info.display_device = true;
1081                         display_devices.push_back(info);
1082                 }
1083                 else
1084                         devices.push_back(info);
1085         }
1086
1087         if(!display_devices.empty())
1088                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
1089 }
1090
1091 CCL_NAMESPACE_END
1092