2 * Copyright 2011, Blender Foundation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include "device_intern.h"
28 #include "util_cuda.h"
29 #include "util_debug.h"
31 #include "util_opengl.h"
32 #include "util_path.h"
33 #include "util_system.h"
34 #include "util_types.h"
35 #include "util_time.h"
39 class CUDADevice : public Device
46 map<device_ptr, bool> tex_interp_map;
51 CUgraphicsResource cuPBOresource;
56 map<device_ptr, PixelMem> pixel_mem_map;
58 CUdeviceptr cuda_device_ptr(device_ptr mem)
60 return (CUdeviceptr)mem;
63 const char *cuda_error_string(CUresult result)
66 case CUDA_SUCCESS: return "No errors";
67 case CUDA_ERROR_INVALID_VALUE: return "Invalid value";
68 case CUDA_ERROR_OUT_OF_MEMORY: return "Out of memory";
69 case CUDA_ERROR_NOT_INITIALIZED: return "Driver not initialized";
70 case CUDA_ERROR_DEINITIALIZED: return "Driver deinitialized";
72 case CUDA_ERROR_NO_DEVICE: return "No CUDA-capable device available";
73 case CUDA_ERROR_INVALID_DEVICE: return "Invalid device";
75 case CUDA_ERROR_INVALID_IMAGE: return "Invalid kernel image";
76 case CUDA_ERROR_INVALID_CONTEXT: return "Invalid context";
77 case CUDA_ERROR_CONTEXT_ALREADY_CURRENT: return "Context already current";
78 case CUDA_ERROR_MAP_FAILED: return "Map failed";
79 case CUDA_ERROR_UNMAP_FAILED: return "Unmap failed";
80 case CUDA_ERROR_ARRAY_IS_MAPPED: return "Array is mapped";
81 case CUDA_ERROR_ALREADY_MAPPED: return "Already mapped";
82 case CUDA_ERROR_NO_BINARY_FOR_GPU: return "No binary for GPU";
83 case CUDA_ERROR_ALREADY_ACQUIRED: return "Already acquired";
84 case CUDA_ERROR_NOT_MAPPED: return "Not mapped";
85 case CUDA_ERROR_NOT_MAPPED_AS_ARRAY: return "Mapped resource not available for access as an array";
86 case CUDA_ERROR_NOT_MAPPED_AS_POINTER: return "Mapped resource not available for access as a pointer";
87 case CUDA_ERROR_ECC_UNCORRECTABLE: return "Uncorrectable ECC error detected";
88 case CUDA_ERROR_UNSUPPORTED_LIMIT: return "CUlimit not supported by device";
90 case CUDA_ERROR_INVALID_SOURCE: return "Invalid source";
91 case CUDA_ERROR_FILE_NOT_FOUND: return "File not found";
92 case CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: return "Link to a shared object failed to resolve";
93 case CUDA_ERROR_SHARED_OBJECT_INIT_FAILED: return "Shared object initialization failed";
95 case CUDA_ERROR_INVALID_HANDLE: return "Invalid handle";
97 case CUDA_ERROR_NOT_FOUND: return "Not found";
99 case CUDA_ERROR_NOT_READY: return "CUDA not ready";
101 case CUDA_ERROR_LAUNCH_FAILED: return "Launch failed";
102 case CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: return "Launch exceeded resources";
103 case CUDA_ERROR_LAUNCH_TIMEOUT: return "Launch exceeded timeout";
104 case CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: return "Launch with incompatible texturing";
106 case CUDA_ERROR_UNKNOWN: return "Unknown error";
108 default: return "Unknown CUDA error value";
115 #define cuda_abort() abort()
118 #define cuda_assert(stmt) \
120 CUresult result = stmt; \
122 if(result != CUDA_SUCCESS) { \
123 string message = string_printf("CUDA error: %s in %s", cuda_error_string(result), #stmt); \
124 if(error_msg == "") \
125 error_msg = message; \
126 fprintf(stderr, "%s\n", message.c_str()); \
131 bool cuda_error(CUresult result)
133 if(result == CUDA_SUCCESS)
136 string message = string_printf("CUDA error: %s", cuda_error_string(result));
139 fprintf(stderr, "%s\n", message.c_str());
143 void cuda_error(const string& message)
147 fprintf(stderr, "%s\n", message.c_str());
150 void cuda_push_context()
152 cuda_assert(cuCtxSetCurrent(cuContext))
155 void cuda_pop_context()
157 cuda_assert(cuCtxSetCurrent(NULL));
160 CUDADevice(DeviceInfo& info, bool background_)
162 background = background_;
169 if(cuda_error(cuInit(0)))
172 /* setup device and context */
173 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
179 result = cuCtxCreate(&cuContext, 0, cuDevice);
182 result = cuGLCtxCreate(&cuContext, 0, cuDevice);
184 if(result != CUDA_SUCCESS) {
185 result = cuCtxCreate(&cuContext, 0, cuDevice);
190 if(cuda_error(result))
201 cuda_assert(cuCtxDetach(cuContext))
204 bool support_device(bool experimental)
208 cuDeviceComputeCapability(&major, &minor, cuDevId);
210 if(major <= 1 && minor <= 2) {
211 cuda_error(string_printf("CUDA device supported only with compute capability 1.3 or up, found %d.%d.", major, minor));
219 string compile_kernel()
221 /* compute cubin name */
223 cuDeviceComputeCapability(&major, &minor, cuDevId);
225 /* attempt to use kernel provided with blender */
226 string cubin = path_get(string_printf("lib/kernel_sm_%d%d.cubin", major, minor));
227 if(path_exists(cubin))
230 /* not found, try to use locally compiled kernel */
231 string kernel_path = path_get("kernel");
232 string md5 = path_files_md5_hash(kernel_path);
234 cubin = string_printf("cycles_kernel_sm%d%d_%s.cubin", major, minor, md5.c_str());
235 cubin = path_user_get(path_join("cache", cubin));
237 /* if exists already, use it */
238 if(path_exists(cubin))
241 #if defined(WITH_CUDA_BINARIES) && defined(_WIN32)
242 if(major <= 1 && minor <= 2)
243 cuda_error(string_printf("CUDA device supported only compute capability 1.3 or up, found %d.%d.", major, minor));
245 cuda_error(string_printf("CUDA binary kernel for this graphics card compute capability (%d.%d) not found.", major, minor));
248 /* if not, find CUDA compiler */
249 string nvcc = cuCompilerPath();
252 cuda_error("CUDA nvcc compiler not found. Install CUDA toolkit in default location.");
257 string kernel = path_join(kernel_path, "kernel.cu");
258 string include = kernel_path;
259 const int machine = system_cpu_bits();
260 const int maxreg = 24;
262 double starttime = time_dt();
263 printf("Compiling CUDA kernel ...\n");
265 path_create_directories(cubin);
267 string command = string_printf("\"%s\" -arch=sm_%d%d -m%d --cubin \"%s\" "
268 "-o \"%s\" --ptxas-options=\"-v\" --maxrregcount=%d --opencc-options -OPT:Olimit=0 -I\"%s\" -DNVCC",
269 nvcc.c_str(), major, minor, machine, kernel.c_str(), cubin.c_str(), maxreg, include.c_str());
271 if(system(command.c_str()) == -1) {
272 cuda_error("Failed to execute compilation command, see console for details.");
276 /* verify if compilation succeeded */
277 if(!path_exists(cubin)) {
278 cuda_error("CUDA kernel compilation failed, see console for details.");
282 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
288 bool load_kernels(bool experimental)
290 /* check if cuda init succeeded */
294 if(!support_device(experimental))
298 string cubin = compile_kernel();
306 CUresult result = cuModuleLoad(&cuModule, cubin.c_str());
307 if(cuda_error(result))
308 cuda_error(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
312 return (result == CUDA_SUCCESS);
315 void mem_alloc(device_memory& mem, MemoryType type)
318 CUdeviceptr device_pointer;
319 cuda_assert(cuMemAlloc(&device_pointer, mem.memory_size()))
320 mem.device_pointer = (device_ptr)device_pointer;
324 void mem_copy_to(device_memory& mem)
327 cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()))
331 void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
333 size_t offset = elem*y*w;
334 size_t size = elem*w*h;
337 cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
338 (CUdeviceptr)((uchar*)mem.device_pointer + offset), size))
342 void mem_zero(device_memory& mem)
344 memset((void*)mem.data_pointer, 0, mem.memory_size());
347 cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()))
351 void mem_free(device_memory& mem)
353 if(mem.device_pointer) {
355 cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)))
358 mem.device_pointer = 0;
362 void const_copy_to(const char *name, void *host, size_t size)
368 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name))
369 //assert(bytes == size);
370 cuda_assert(cuMemcpyHtoD(mem, host, size))
374 void tex_alloc(const char *name, device_memory& mem, bool interpolation, bool periodic)
376 /* determine format */
377 CUarray_format_enum format;
378 size_t dsize = datatype_size(mem.data_type);
379 size_t size = mem.memory_size();
381 switch(mem.data_type) {
382 case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
383 case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
384 case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
385 case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
386 default: assert(0); return;
392 cuda_assert(cuModuleGetTexRef(&texref, cuModule, name))
396 CUDA_ARRAY_DESCRIPTOR desc;
398 desc.Width = mem.data_width;
399 desc.Height = mem.data_height;
400 desc.Format = format;
401 desc.NumChannels = mem.data_elements;
403 cuda_assert(cuArrayCreate(&handle, &desc))
405 if(mem.data_height > 1) {
407 memset(¶m, 0, sizeof(param));
408 param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
409 param.dstArray = handle;
410 param.srcMemoryType = CU_MEMORYTYPE_HOST;
411 param.srcHost = (void*)mem.data_pointer;
412 param.srcPitch = mem.data_width*dsize*mem.data_elements;
413 param.WidthInBytes = param.srcPitch;
414 param.Height = mem.data_height;
416 cuda_assert(cuMemcpy2D(¶m))
419 cuda_assert(cuMemcpyHtoA(handle, 0, (void*)mem.data_pointer, size))
421 cuda_assert(cuTexRefSetArray(texref, handle, CU_TRSA_OVERRIDE_FORMAT))
423 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR))
424 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES))
426 mem.device_pointer = (device_ptr)handle;
431 mem_alloc(mem, MEM_READ_ONLY);
436 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size))
437 cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT))
438 cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_READ_AS_INTEGER))
442 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_WRAP))
443 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_WRAP))
446 cuda_assert(cuTexRefSetAddressMode(texref, 0, CU_TR_ADDRESS_MODE_CLAMP))
447 cuda_assert(cuTexRefSetAddressMode(texref, 1, CU_TR_ADDRESS_MODE_CLAMP))
449 cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements))
453 tex_interp_map[mem.device_pointer] = interpolation;
456 void tex_free(device_memory& mem)
458 if(mem.device_pointer) {
459 if(tex_interp_map[mem.device_pointer]) {
461 cuArrayDestroy((CUarray)mem.device_pointer);
464 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
465 mem.device_pointer = 0;
468 tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
474 void path_trace(RenderTile& rtile, int sample)
478 CUfunction cuPathTrace;
479 CUdeviceptr d_buffer = cuda_device_ptr(rtile.buffer);
480 CUdeviceptr d_rng_state = cuda_device_ptr(rtile.rng_state);
482 /* get kernel function */
483 cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"))
485 /* pass in parameters */
488 cuda_assert(cuParamSetv(cuPathTrace, offset, &d_buffer, sizeof(d_buffer)))
489 offset += sizeof(d_buffer);
491 cuda_assert(cuParamSetv(cuPathTrace, offset, &d_rng_state, sizeof(d_rng_state)))
492 offset += sizeof(d_rng_state);
494 offset = align_up(offset, __alignof(sample));
496 cuda_assert(cuParamSeti(cuPathTrace, offset, sample))
497 offset += sizeof(sample);
499 cuda_assert(cuParamSeti(cuPathTrace, offset, rtile.x))
500 offset += sizeof(rtile.x);
502 cuda_assert(cuParamSeti(cuPathTrace, offset, rtile.y))
503 offset += sizeof(rtile.y);
505 cuda_assert(cuParamSeti(cuPathTrace, offset, rtile.w))
506 offset += sizeof(rtile.w);
508 cuda_assert(cuParamSeti(cuPathTrace, offset, rtile.h))
509 offset += sizeof(rtile.h);
511 cuda_assert(cuParamSeti(cuPathTrace, offset, rtile.offset))
512 offset += sizeof(rtile.offset);
514 cuda_assert(cuParamSeti(cuPathTrace, offset, rtile.stride))
515 offset += sizeof(rtile.stride);
517 cuda_assert(cuParamSetSize(cuPathTrace, offset))
519 /* launch kernel: todo find optimal size, cache config for fermi */
527 int xblocks = (rtile.w + xthreads - 1)/xthreads;
528 int yblocks = (rtile.h + ythreads - 1)/ythreads;
530 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1))
531 cuda_assert(cuFuncSetBlockShape(cuPathTrace, xthreads, ythreads, 1))
532 cuda_assert(cuLaunchGrid(cuPathTrace, xblocks, yblocks))
534 cuda_assert(cuCtxSynchronize())
539 void tonemap(DeviceTask& task, device_ptr buffer, device_ptr rgba)
543 CUfunction cuFilmConvert;
544 CUdeviceptr d_rgba = map_pixels(rgba);
545 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
547 /* get kernel function */
548 cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_tonemap"))
550 /* pass in parameters */
553 cuda_assert(cuParamSetv(cuFilmConvert, offset, &d_rgba, sizeof(d_rgba)))
554 offset += sizeof(d_rgba);
556 cuda_assert(cuParamSetv(cuFilmConvert, offset, &d_buffer, sizeof(d_buffer)))
557 offset += sizeof(d_buffer);
559 int sample = task.sample;
560 offset = align_up(offset, __alignof(sample));
562 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.sample))
563 offset += sizeof(task.sample);
565 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.resolution))
566 offset += sizeof(task.resolution);
568 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.x))
569 offset += sizeof(task.x);
571 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.y))
572 offset += sizeof(task.y);
574 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.w))
575 offset += sizeof(task.w);
577 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.h))
578 offset += sizeof(task.h);
580 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.offset))
581 offset += sizeof(task.offset);
583 cuda_assert(cuParamSeti(cuFilmConvert, offset, task.stride))
584 offset += sizeof(task.stride);
586 cuda_assert(cuParamSetSize(cuFilmConvert, offset))
588 /* launch kernel: todo find optimal size, cache config for fermi */
596 int xblocks = (task.w + xthreads - 1)/xthreads;
597 int yblocks = (task.h + ythreads - 1)/ythreads;
599 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1))
600 cuda_assert(cuFuncSetBlockShape(cuFilmConvert, xthreads, ythreads, 1))
601 cuda_assert(cuLaunchGrid(cuFilmConvert, xblocks, yblocks))
603 unmap_pixels(task.rgba);
608 void shader(DeviceTask& task)
612 CUfunction cuDisplace;
613 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
614 CUdeviceptr d_offset = cuda_device_ptr(task.shader_output);
616 /* get kernel function */
617 cuda_assert(cuModuleGetFunction(&cuDisplace, cuModule, "kernel_cuda_shader"))
619 /* pass in parameters */
622 cuda_assert(cuParamSetv(cuDisplace, offset, &d_input, sizeof(d_input)))
623 offset += sizeof(d_input);
625 cuda_assert(cuParamSetv(cuDisplace, offset, &d_offset, sizeof(d_offset)))
626 offset += sizeof(d_offset);
628 int shader_eval_type = task.shader_eval_type;
629 offset = align_up(offset, __alignof(shader_eval_type));
631 cuda_assert(cuParamSeti(cuDisplace, offset, task.shader_eval_type))
632 offset += sizeof(task.shader_eval_type);
634 cuda_assert(cuParamSeti(cuDisplace, offset, task.shader_x))
635 offset += sizeof(task.shader_x);
637 cuda_assert(cuParamSetSize(cuDisplace, offset))
639 /* launch kernel: todo find optimal size, cache config for fermi */
645 int xblocks = (task.shader_w + xthreads - 1)/xthreads;
647 cuda_assert(cuFuncSetCacheConfig(cuDisplace, CU_FUNC_CACHE_PREFER_L1))
648 cuda_assert(cuFuncSetBlockShape(cuDisplace, xthreads, 1, 1))
649 cuda_assert(cuLaunchGrid(cuDisplace, xblocks, 1))
654 CUdeviceptr map_pixels(device_ptr mem)
657 PixelMem pmem = pixel_mem_map[mem];
661 cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0))
662 cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource))
667 return cuda_device_ptr(mem);
670 void unmap_pixels(device_ptr mem)
673 PixelMem pmem = pixel_mem_map[mem];
675 cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0))
679 void pixels_alloc(device_memory& mem)
684 pmem.w = mem.data_width;
685 pmem.h = mem.data_height;
689 glGenBuffers(1, &pmem.cuPBO);
690 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
691 glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLfloat)*3, NULL, GL_DYNAMIC_DRAW);
693 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
695 glGenTextures(1, &pmem.cuTexId);
696 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
697 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
698 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
699 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
700 glBindTexture(GL_TEXTURE_2D, 0);
702 CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
704 if(!cuda_error(result)) {
707 mem.device_pointer = pmem.cuTexId;
708 pixel_mem_map[mem.device_pointer] = pmem;
713 /* failed to register buffer, fallback to no interop */
714 glDeleteBuffers(1, &pmem.cuPBO);
715 glDeleteTextures(1, &pmem.cuTexId);
723 Device::pixels_alloc(mem);
726 void pixels_copy_from(device_memory& mem, int y, int w, int h)
729 PixelMem pmem = pixel_mem_map[mem.device_pointer];
733 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
734 uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
735 size_t offset = sizeof(uchar)*4*y*w;
736 memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
737 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
738 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
745 Device::pixels_copy_from(mem, y, w, h);
748 void pixels_free(device_memory& mem)
750 if(mem.device_pointer) {
752 PixelMem pmem = pixel_mem_map[mem.device_pointer];
756 cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource))
757 glDeleteBuffers(1, &pmem.cuPBO);
758 glDeleteTextures(1, &pmem.cuTexId);
762 pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
763 mem.device_pointer = 0;
768 Device::pixels_free(mem);
772 void draw_pixels(device_memory& mem, int y, int w, int h, int dy, int width, int height, bool transparent)
775 PixelMem pmem = pixel_mem_map[mem.device_pointer];
779 /* for multi devices, this assumes the ineffecient method that we allocate
780 * all pixels on the device even though we only render to a subset */
781 size_t offset = sizeof(uint8_t)*4*y*w;
783 glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pmem.cuPBO);
784 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
785 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
786 glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
788 glEnable(GL_TEXTURE_2D);
792 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
795 glColor3f(1.0f, 1.0f, 1.0f);
798 glTranslatef(0.0f, (float)dy, 0.0f);
802 glTexCoord2f(0.0f, 0.0f);
803 glVertex2f(0.0f, 0.0f);
804 glTexCoord2f((float)w/(float)pmem.w, 0.0f);
805 glVertex2f((float)width, 0.0f);
806 glTexCoord2f((float)w/(float)pmem.w, (float)h/(float)pmem.h);
807 glVertex2f((float)width, (float)height);
808 glTexCoord2f(0.0f, (float)h/(float)pmem.h);
809 glVertex2f(0.0f, (float)height);
818 glBindTexture(GL_TEXTURE_2D, 0);
819 glDisable(GL_TEXTURE_2D);
826 Device::draw_pixels(mem, y, w, h, dy, width, height, transparent);
829 void thread_run(DeviceTask *task)
831 if(task->type == DeviceTask::PATH_TRACE) {
834 /* keep rendering tiles until done */
835 while(task->acquire_tile(this, tile)) {
836 int start_sample = tile.start_sample;
837 int end_sample = tile.start_sample + tile.num_samples;
839 for(int sample = start_sample; sample < end_sample; sample++) {
840 if (task->get_cancel()) {
844 path_trace(tile, sample);
845 task->update_progress(tile);
848 task->release_tile(tile);
851 else if(task->type == DeviceTask::SHADER) {
855 cuda_assert(cuCtxSynchronize())
860 class CUDADeviceTask : public DeviceTask {
862 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
865 run = function_bind(&CUDADevice::thread_run, device, this);
869 void task_add(DeviceTask& task)
871 if(task.type == DeviceTask::TONEMAP) {
872 /* must be done in main thread due to opengl access */
873 tonemap(task, task.buffer, task.rgba);
876 cuda_assert(cuCtxSynchronize())
880 task_pool.push(new CUDADeviceTask(this, task));
886 task_pool.wait_work();
895 Device *device_cuda_create(DeviceInfo& info, bool background)
897 return new CUDADevice(info, background);
900 void device_cuda_info(vector<DeviceInfo>& devices)
904 if(cuInit(0) != CUDA_SUCCESS)
906 if(cuDeviceGetCount(&count) != CUDA_SUCCESS)
909 vector<DeviceInfo> display_devices;
911 for(int num = 0; num < count; num++) {
915 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
920 info.type = DEVICE_CUDA;
921 info.description = string(name);
922 info.id = string_printf("CUDA_%d", num);
926 cuDeviceComputeCapability(&major, &minor, num);
927 info.advanced_shading = (major >= 2);
928 info.pack_images = false;
930 /* if device has a kernel timeout, assume it is used for display */
931 if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
932 info.display_device = true;
933 display_devices.push_back(info);
936 devices.push_back(info);
939 if(!display_devices.empty())
940 devices.insert(devices.end(), display_devices.begin(), display_devices.end());