Cycles: Improve denoising speed on GPUs with small tile sizes
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_logging.h"
41 #include "util/util_map.h"
42 #include "util/util_md5.h"
43 #include "util/util_opengl.h"
44 #include "util/util_path.h"
45 #include "util/util_string.h"
46 #include "util/util_system.h"
47 #include "util/util_types.h"
48 #include "util/util_time.h"
49
50 #include "kernel/split/kernel_split_data_types.h"
51
52 CCL_NAMESPACE_BEGIN
53
54 #ifndef WITH_CUDA_DYNLOAD
55
56 /* Transparently implement some functions, so majority of the file does not need
57  * to worry about difference between dynamically loaded and linked CUDA at all.
58  */
59
60 namespace {
61
62 const char *cuewErrorString(CUresult result)
63 {
64         /* We can only give error code here without major code duplication, that
65          * should be enough since dynamic loading is only being disabled by folks
66          * who knows what they're doing anyway.
67          *
68          * NOTE: Avoid call from several threads.
69          */
70         static string error;
71         error = string_printf("%d", result);
72         return error.c_str();
73 }
74
75 const char *cuewCompilerPath(void)
76 {
77         return CYCLES_CUDA_NVCC_EXECUTABLE;
78 }
79
80 int cuewCompilerVersion(void)
81 {
82         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
83 }
84
85 }  /* namespace */
86 #endif  /* WITH_CUDA_DYNLOAD */
87
88 class CUDADevice;
89
90 class CUDASplitKernel : public DeviceSplitKernel {
91         CUDADevice *device;
92 public:
93         explicit CUDASplitKernel(CUDADevice *device);
94
95         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
96
97         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
98                                                     RenderTile& rtile,
99                                                     int num_global_elements,
100                                                     device_memory& kernel_globals,
101                                                     device_memory& kernel_data_,
102                                                     device_memory& split_data,
103                                                     device_memory& ray_state,
104                                                     device_memory& queue_index,
105                                                     device_memory& use_queues_flag,
106                                                     device_memory& work_pool_wgs);
107
108         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
109                                                                const DeviceRequestedFeatures&);
110         virtual int2 split_kernel_local_size();
111         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
112 };
113
114 /* Utility to push/pop CUDA context. */
115 class CUDAContextScope {
116 public:
117         CUDAContextScope(CUDADevice *device);
118         ~CUDAContextScope();
119
120 private:
121         CUDADevice *device;
122 };
123
124 class CUDADevice : public Device
125 {
126 public:
127         DedicatedTaskPool task_pool;
128         CUdevice cuDevice;
129         CUcontext cuContext;
130         CUmodule cuModule, cuFilterModule;
131         int cuDevId;
132         int cuDevArchitecture;
133         bool first_error;
134         CUDASplitKernel *split_kernel;
135
136         struct CUDAMem {
137                 CUDAMem()
138                 : texobject(0), array(0) {}
139
140                 CUtexObject texobject;
141                 CUarray array;
142         };
143         map<device_memory*, CUDAMem> cuda_mem_map;
144
145         struct PixelMem {
146                 GLuint cuPBO;
147                 CUgraphicsResource cuPBOresource;
148                 GLuint cuTexId;
149                 int w, h;
150         };
151         map<device_ptr, PixelMem> pixel_mem_map;
152
153         /* Bindless Textures */
154         device_vector<TextureInfo> texture_info;
155         bool need_texture_info;
156
157         CUdeviceptr cuda_device_ptr(device_ptr mem)
158         {
159                 return (CUdeviceptr)mem;
160         }
161
162         static bool have_precompiled_kernels()
163         {
164                 string cubins_path = path_get("lib");
165                 return path_exists(cubins_path);
166         }
167
168         virtual bool show_samples() const
169         {
170                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
171                 return true;
172         }
173
174 /*#ifdef NDEBUG
175 #define cuda_abort()
176 #else
177 #define cuda_abort() abort()
178 #endif*/
179         void cuda_error_documentation()
180         {
181                 if(first_error) {
182                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
183                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
184                         first_error = false;
185                 }
186         }
187
188 #define cuda_assert(stmt) \
189         { \
190                 CUresult result = stmt; \
191                 \
192                 if(result != CUDA_SUCCESS) { \
193                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
194                         if(error_msg == "") \
195                                 error_msg = message; \
196                         fprintf(stderr, "%s\n", message.c_str()); \
197                         /*cuda_abort();*/ \
198                         cuda_error_documentation(); \
199                 } \
200         } (void)0
201
202         bool cuda_error_(CUresult result, const string& stmt)
203         {
204                 if(result == CUDA_SUCCESS)
205                         return false;
206
207                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
208                 if(error_msg == "")
209                         error_msg = message;
210                 fprintf(stderr, "%s\n", message.c_str());
211                 cuda_error_documentation();
212                 return true;
213         }
214
215 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
216
217         void cuda_error_message(const string& message)
218         {
219                 if(error_msg == "")
220                         error_msg = message;
221                 fprintf(stderr, "%s\n", message.c_str());
222                 cuda_error_documentation();
223         }
224
225         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
226         : Device(info, stats, background_),
227           texture_info(this, "__texture_info", MEM_TEXTURE)
228         {
229                 first_error = true;
230                 background = background_;
231
232                 cuDevId = info.num;
233                 cuDevice = 0;
234                 cuContext = 0;
235
236                 cuModule = 0;
237                 cuFilterModule = 0;
238
239                 split_kernel = NULL;
240
241                 need_texture_info = false;
242
243                 /* Intialize CUDA. */
244                 if(cuda_error(cuInit(0)))
245                         return;
246
247                 /* Setup device and context. */
248                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
249                         return;
250
251                 /* CU_CTX_LMEM_RESIZE_TO_MAX for reserving local memory ahead of render,
252                  * so we can predict which memory to map to host. */
253                 unsigned int ctx_flags = CU_CTX_LMEM_RESIZE_TO_MAX;
254
255                 /* Create context. */
256                 CUresult result;
257
258                 if(background) {
259                         result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
260                 }
261                 else {
262                         result = cuGLCtxCreate(&cuContext, ctx_flags, cuDevice);
263
264                         if(result != CUDA_SUCCESS) {
265                                 result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
266                                 background = true;
267                         }
268                 }
269
270                 if(cuda_error_(result, "cuCtxCreate"))
271                         return;
272
273                 int major, minor;
274                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
275                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
276                 cuDevArchitecture = major*100 + minor*10;
277
278                 /* Pop context set by cuCtxCreate. */
279                 cuCtxPopCurrent(NULL);
280         }
281
282         ~CUDADevice()
283         {
284                 task_pool.stop();
285
286                 delete split_kernel;
287
288                 if(!info.has_fermi_limits) {
289                         texture_info.free();
290                 }
291
292                 cuda_assert(cuCtxDestroy(cuContext));
293         }
294
295         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
296         {
297                 int major, minor;
298                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
299                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
300
301                 /* We only support sm_20 and above */
302                 if(major < 2) {
303                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
304                         return false;
305                 }
306
307                 return true;
308         }
309
310         bool use_adaptive_compilation()
311         {
312                 return DebugFlags().cuda.adaptive_compile;
313         }
314
315         bool use_split_kernel()
316         {
317                 return DebugFlags().cuda.split_kernel;
318         }
319
320         /* Common NVCC flags which stays the same regardless of shading model,
321          * kernel sources md5 and only depends on compiler or compilation settings.
322          */
323         string compile_kernel_get_common_cflags(
324                 const DeviceRequestedFeatures& requested_features,
325                 bool filter=false, bool split=false)
326         {
327                 const int cuda_version = cuewCompilerVersion();
328                 const int machine = system_cpu_bits();
329                 const string source_path = path_get("source");
330                 const string include_path = source_path;
331                 string cflags = string_printf("-m%d "
332                                               "--ptxas-options=\"-v\" "
333                                               "--use_fast_math "
334                                               "-DNVCC "
335                                               "-D__KERNEL_CUDA_VERSION__=%d "
336                                                "-I\"%s\"",
337                                               machine,
338                                               cuda_version,
339                                               include_path.c_str());
340                 if(!filter && use_adaptive_compilation()) {
341                         cflags += " " + requested_features.get_build_options();
342                 }
343                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
344                 if(extra_cflags) {
345                         cflags += string(" ") + string(extra_cflags);
346                 }
347 #ifdef WITH_CYCLES_DEBUG
348                 cflags += " -D__KERNEL_DEBUG__";
349 #endif
350
351                 if(split) {
352                         cflags += " -D__SPLIT__";
353                 }
354
355                 return cflags;
356         }
357
358         bool compile_check_compiler() {
359                 const char *nvcc = cuewCompilerPath();
360                 if(nvcc == NULL) {
361                         cuda_error_message("CUDA nvcc compiler not found. "
362                                            "Install CUDA toolkit in default location.");
363                         return false;
364                 }
365                 const int cuda_version = cuewCompilerVersion();
366                 VLOG(1) << "Found nvcc " << nvcc
367                         << ", CUDA version " << cuda_version
368                         << ".";
369                 const int major = cuda_version / 10, minor = cuda_version & 10;
370                 if(cuda_version == 0) {
371                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
372                         return false;
373                 }
374                 if(cuda_version < 80) {
375                         printf("Unsupported CUDA version %d.%d detected, "
376                                "you need CUDA 8.0 or newer.\n",
377                                major, minor);
378                         return false;
379                 }
380                 else if(cuda_version != 80) {
381                         printf("CUDA version %d.%d detected, build may succeed but only "
382                                "CUDA 8.0 is officially supported.\n",
383                                major, minor);
384                 }
385                 return true;
386         }
387
388         string compile_kernel(const DeviceRequestedFeatures& requested_features,
389                               bool filter=false, bool split=false)
390         {
391                 const char *name, *source;
392                 if(filter) {
393                         name = "filter";
394                         source = "filter.cu";
395                 }
396                 else if(split) {
397                         name = "kernel_split";
398                         source = "kernel_split.cu";
399                 }
400                 else {
401                         name = "kernel";
402                         source = "kernel.cu";
403                 }
404                 /* Compute cubin name. */
405                 int major, minor;
406                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
407                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
408
409                 /* Attempt to use kernel provided with Blender. */
410                 if(!use_adaptive_compilation()) {
411                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
412                                                                     name, major, minor));
413                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
414                         if(path_exists(cubin)) {
415                                 VLOG(1) << "Using precompiled kernel.";
416                                 return cubin;
417                         }
418                 }
419
420                 const string common_cflags =
421                         compile_kernel_get_common_cflags(requested_features, filter, split);
422
423                 /* Try to use locally compiled kernel. */
424                 const string source_path = path_get("source");
425                 const string kernel_md5 = path_files_md5_hash(source_path);
426
427                 /* We include cflags into md5 so changing cuda toolkit or changing other
428                  * compiler command line arguments makes sure cubin gets re-built.
429                  */
430                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
431
432                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
433                                                         name, major, minor,
434                                                         cubin_md5.c_str());
435                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
436                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
437                 if(path_exists(cubin)) {
438                         VLOG(1) << "Using locally compiled kernel.";
439                         return cubin;
440                 }
441
442 #ifdef _WIN32
443                 if(have_precompiled_kernels()) {
444                         if(major < 2) {
445                                 cuda_error_message(string_printf(
446                                         "CUDA device requires compute capability 2.0 or up, "
447                                         "found %d.%d. Your GPU is not supported.",
448                                         major, minor));
449                         }
450                         else {
451                                 cuda_error_message(string_printf(
452                                         "CUDA binary kernel for this graphics card compute "
453                                         "capability (%d.%d) not found.",
454                                         major, minor));
455                         }
456                         return "";
457                 }
458 #endif
459
460                 /* Compile. */
461                 if(!compile_check_compiler()) {
462                         return "";
463                 }
464                 const char *nvcc = cuewCompilerPath();
465                 const string kernel = path_join(
466                         path_join(source_path, "kernel"),
467                         path_join("kernels",
468                                   path_join("cuda", source)));
469                 double starttime = time_dt();
470                 printf("Compiling CUDA kernel ...\n");
471
472                 path_create_directories(cubin);
473
474                 string command = string_printf("\"%s\" "
475                                                "-arch=sm_%d%d "
476                                                "--cubin \"%s\" "
477                                                "-o \"%s\" "
478                                                "%s ",
479                                                nvcc,
480                                                major, minor,
481                                                kernel.c_str(),
482                                                cubin.c_str(),
483                                                common_cflags.c_str());
484
485                 printf("%s\n", command.c_str());
486
487                 if(system(command.c_str()) == -1) {
488                         cuda_error_message("Failed to execute compilation command, "
489                                            "see console for details.");
490                         return "";
491                 }
492
493                 /* Verify if compilation succeeded */
494                 if(!path_exists(cubin)) {
495                         cuda_error_message("CUDA kernel compilation failed, "
496                                            "see console for details.");
497                         return "";
498                 }
499
500                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
501
502                 return cubin;
503         }
504
505         bool load_kernels(const DeviceRequestedFeatures& requested_features)
506         {
507                 /* TODO(sergey): Support kernels re-load for CUDA devices.
508                  *
509                  * Currently re-loading kernel will invalidate memory pointers,
510                  * causing problems in cuCtxSynchronize.
511                  */
512                 if(cuFilterModule && cuModule) {
513                         VLOG(1) << "Skipping kernel reload, not currently supported.";
514                         return true;
515                 }
516
517                 /* check if cuda init succeeded */
518                 if(cuContext == 0)
519                         return false;
520
521                 /* check if GPU is supported */
522                 if(!support_device(requested_features))
523                         return false;
524
525                 /* get kernel */
526                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
527                 if(cubin == "")
528                         return false;
529
530                 string filter_cubin = compile_kernel(requested_features, true, false);
531                 if(filter_cubin == "")
532                         return false;
533
534                 /* open module */
535                 CUDAContextScope scope(this);
536
537                 string cubin_data;
538                 CUresult result;
539
540                 if(path_read_text(cubin, cubin_data))
541                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
542                 else
543                         result = CUDA_ERROR_FILE_NOT_FOUND;
544
545                 if(cuda_error_(result, "cuModuleLoad"))
546                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
547
548                 if(path_read_text(filter_cubin, cubin_data))
549                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
550                 else
551                         result = CUDA_ERROR_FILE_NOT_FOUND;
552
553                 if(cuda_error_(result, "cuModuleLoad"))
554                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
555
556                 if(result == CUDA_SUCCESS) {
557                         reserve_local_memory(requested_features);
558                 }
559
560                 return (result == CUDA_SUCCESS);
561         }
562
563         void reserve_local_memory(const DeviceRequestedFeatures& requested_features)
564         {
565                 if(use_split_kernel()) {
566                         /* Split kernel mostly uses global memory and adaptive compilation,
567                          * difficult to predict how much is needed currently. */
568                         return;
569                 }
570
571                 /* Together with CU_CTX_LMEM_RESIZE_TO_MAX, this reserves local memory
572                  * needed for kernel launches, so that we can reliably figure out when
573                  * to allocate scene data in mapped host memory. */
574                 CUDAContextScope scope(this);
575
576                 size_t total = 0, free_before = 0, free_after = 0;
577                 cuMemGetInfo(&free_before, &total);
578
579                 /* Get kernel function. */
580                 CUfunction cuPathTrace;
581
582                 if(requested_features.use_integrator_branched) {
583                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
584                 }
585                 else {
586                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
587                 }
588
589                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
590
591                 int min_blocks, num_threads_per_block;
592                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
593
594                 /* Launch kernel, using just 1 block appears sufficient to reserve
595                  * memory for all multiprocessors. It would be good to do this in
596                  * parallel for the multi GPU case still to make it faster. */
597                 CUdeviceptr d_work_tiles = 0;
598                 uint total_work_size = 0;
599
600                 void *args[] = {&d_work_tiles,
601                                 &total_work_size};
602
603                 cuda_assert(cuLaunchKernel(cuPathTrace,
604                                            1, 1, 1,
605                                            num_threads_per_block, 1, 1,
606                                            0, 0, args, 0));
607
608                 cuda_assert(cuCtxSynchronize());
609
610                 cuMemGetInfo(&free_after, &total);
611                 VLOG(1) << "Local memory reserved "
612                         << string_human_readable_number(free_before - free_after) << " bytes. ("
613                         << string_human_readable_size(free_before - free_after) << ")";
614         }
615
616         void load_texture_info()
617         {
618                 if(!info.has_fermi_limits && need_texture_info) {
619                         texture_info.copy_to_device();
620                         need_texture_info = false;
621                 }
622         }
623
624         CUDAMem *generic_alloc(device_memory& mem, size_t padding = 0)
625         {
626                 CUDAContextScope scope(this);
627
628                 if(mem.name) {
629                         VLOG(1) << "Buffer allocate: " << mem.name << ", "
630                                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
631                                         << string_human_readable_size(mem.memory_size()) << ")";
632                 }
633
634                 /* Allocate memory on device. */
635                 CUdeviceptr device_pointer = 0;
636                 size_t size = mem.memory_size();
637                 cuda_assert(cuMemAlloc(&device_pointer, size + padding));
638                 mem.device_pointer = (device_ptr)device_pointer;
639                 mem.device_size = size;
640                 stats.mem_alloc(size);
641
642                 if(!mem.device_pointer) {
643                         return NULL;
644                 }
645
646                 /* Insert into map of allocations. */
647                 CUDAMem *cmem = &cuda_mem_map[&mem];
648                 return cmem;
649         }
650
651         void generic_copy_to(device_memory& mem)
652         {
653                 if(mem.device_pointer) {
654                         CUDAContextScope scope(this);
655                         cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), mem.host_pointer, mem.memory_size()));
656                 }
657         }
658
659         void generic_free(device_memory& mem)
660         {
661                 if(mem.device_pointer) {
662                         CUDAContextScope scope(this);
663
664                         cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
665
666                         stats.mem_free(mem.device_size);
667                         mem.device_pointer = 0;
668                         mem.device_size = 0;
669
670                         cuda_mem_map.erase(cuda_mem_map.find(&mem));
671                 }
672         }
673
674         void mem_alloc(device_memory& mem)
675         {
676                 if(mem.type == MEM_PIXELS && !background) {
677                         pixels_alloc(mem);
678                 }
679                 else if(mem.type == MEM_TEXTURE) {
680                         assert(!"mem_alloc not supported for textures.");
681                 }
682                 else {
683                         generic_alloc(mem);
684                 }
685         }
686
687         void mem_copy_to(device_memory& mem)
688         {
689                 if(mem.type == MEM_PIXELS) {
690                         assert(!"mem_copy_to not supported for pixels.");
691                 }
692                 else if(mem.type == MEM_TEXTURE) {
693                         tex_free(mem);
694                         tex_alloc(mem);
695                 }
696                 else {
697                         if(!mem.device_pointer) {
698                                 generic_alloc(mem);
699                         }
700
701                         generic_copy_to(mem);
702                 }
703         }
704
705         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
706         {
707                 if(mem.type == MEM_PIXELS && !background) {
708                         pixels_copy_from(mem, y, w, h);
709                 }
710                 else if(mem.type == MEM_TEXTURE) {
711                         assert(!"mem_copy_from not supported for textures.");
712                 }
713                 else {
714                         CUDAContextScope scope(this);
715                         size_t offset = elem*y*w;
716                         size_t size = elem*w*h;
717
718                         if(mem.device_pointer) {
719                                 cuda_assert(cuMemcpyDtoH((uchar*)mem.host_pointer + offset,
720                                                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
721                         }
722                         else {
723                                 memset((char*)mem.host_pointer + offset, 0, size);
724                         }
725                 }
726         }
727
728         void mem_zero(device_memory& mem)
729         {
730                 if(!mem.device_pointer) {
731                         mem_alloc(mem);
732                 }
733
734                 if(mem.host_pointer) {
735                         memset(mem.host_pointer, 0, mem.memory_size());
736                 }
737
738                 if(mem.device_pointer) {
739                         CUDAContextScope scope(this);
740                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
741                 }
742         }
743
744         void mem_free(device_memory& mem)
745         {
746                 if(mem.type == MEM_PIXELS && !background) {
747                         pixels_free(mem);
748                 }
749                 else if(mem.type == MEM_TEXTURE) {
750                         tex_free(mem);
751                 }
752                 else {
753                         generic_free(mem);
754                 }
755         }
756
757         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
758         {
759                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
760         }
761
762         void const_copy_to(const char *name, void *host, size_t size)
763         {
764                 CUDAContextScope scope(this);
765                 CUdeviceptr mem;
766                 size_t bytes;
767
768                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
769                 //assert(bytes == size);
770                 cuda_assert(cuMemcpyHtoD(mem, host, size));
771         }
772
773         void tex_alloc(device_memory& mem)
774         {
775                 CUDAContextScope scope(this);
776
777                 VLOG(1) << "Texture allocate: " << mem.name << ", "
778                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
779                         << string_human_readable_size(mem.memory_size()) << ")";
780
781                 /* Check if we are on sm_30 or above, for bindless textures. */
782                 bool has_fermi_limits = info.has_fermi_limits;
783
784                 /* General variables for both architectures */
785                 string bind_name = mem.name;
786                 size_t dsize = datatype_size(mem.data_type);
787                 size_t size = mem.memory_size();
788
789                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
790                 switch(mem.extension) {
791                         case EXTENSION_REPEAT:
792                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
793                                 break;
794                         case EXTENSION_EXTEND:
795                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
796                                 break;
797                         case EXTENSION_CLIP:
798                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
799                                 break;
800                         default:
801                                 assert(0);
802                                 break;
803                 }
804
805                 CUfilter_mode filter_mode;
806                 if(mem.interpolation == INTERPOLATION_CLOSEST) {
807                         filter_mode = CU_TR_FILTER_MODE_POINT;
808                 }
809                 else {
810                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
811                 }
812
813                 /* Data Storage */
814                 if(mem.interpolation == INTERPOLATION_NONE) {
815                         generic_alloc(mem);
816                         generic_copy_to(mem);
817
818                         CUdeviceptr cumem;
819                         size_t cubytes;
820
821                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
822
823                         if(cubytes == 8) {
824                                 /* 64 bit device pointer */
825                                 uint64_t ptr = mem.device_pointer;
826                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
827                         }
828                         else {
829                                 /* 32 bit device pointer */
830                                 uint32_t ptr = (uint32_t)mem.device_pointer;
831                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
832                         }
833                         return;
834                 }
835
836                 /* Image Texture Storage */
837                 CUtexref texref = NULL;
838
839                 if(has_fermi_limits) {
840                         if(mem.data_depth > 1) {
841                                 /* Kernel uses different bind names for 2d and 3d float textures,
842                                  * so we have to adjust couple of things here.
843                                  */
844                                 vector<string> tokens;
845                                 string_split(tokens, mem.name, "_");
846                                 bind_name = string_printf("__tex_image_%s_3d_%s",
847                                                           tokens[2].c_str(),
848                                                           tokens[3].c_str());
849                         }
850
851                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
852
853                         if(!texref) {
854                                 return;
855                         }
856                 }
857
858                 CUarray_format_enum format;
859                 switch(mem.data_type) {
860                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
861                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
862                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
863                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
864                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
865                         default: assert(0); return;
866                 }
867
868                 CUDAMem *cmem = NULL;
869                 CUarray array_3d = NULL;
870                 size_t src_pitch = mem.data_width * dsize * mem.data_elements;
871                 size_t dst_pitch = src_pitch;
872
873                 if(mem.data_depth > 1) {
874                         /* 3D texture using array, there is no API for linear memory. */
875                         CUDA_ARRAY3D_DESCRIPTOR desc;
876
877                         desc.Width = mem.data_width;
878                         desc.Height = mem.data_height;
879                         desc.Depth = mem.data_depth;
880                         desc.Format = format;
881                         desc.NumChannels = mem.data_elements;
882                         desc.Flags = 0;
883
884                         cuda_assert(cuArray3DCreate(&array_3d, &desc));
885
886                         if(!array_3d) {
887                                 return;
888                         }
889
890                         CUDA_MEMCPY3D param;
891                         memset(&param, 0, sizeof(param));
892                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
893                         param.dstArray = array_3d;
894                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
895                         param.srcHost = mem.host_pointer;
896                         param.srcPitch = src_pitch;
897                         param.WidthInBytes = param.srcPitch;
898                         param.Height = mem.data_height;
899                         param.Depth = mem.data_depth;
900
901                         cuda_assert(cuMemcpy3D(&param));
902
903                         mem.device_pointer = (device_ptr)array_3d;
904                         mem.device_size = size;
905                         stats.mem_alloc(size);
906
907                         cmem = &cuda_mem_map[&mem];
908                         cmem->texobject = 0;
909                         cmem->array = array_3d;
910                 }
911                 else if(mem.data_height > 0) {
912                         /* 2D texture, using pitch aligned linear memory. */
913                         int alignment = 0;
914                         cuda_assert(cuDeviceGetAttribute(&alignment, CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT, cuDevice));
915                         dst_pitch = align_up(src_pitch, alignment);
916                         size_t dst_size = dst_pitch * mem.data_height;
917
918                         cmem = generic_alloc(mem, dst_size - mem.memory_size());
919                         if(!cmem) {
920                                 return;
921                         }
922
923                         CUDA_MEMCPY2D param;
924                         memset(&param, 0, sizeof(param));
925                         param.dstMemoryType = CU_MEMORYTYPE_DEVICE;
926                         param.dstDevice = mem.device_pointer;
927                         param.dstPitch = dst_pitch;
928                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
929                         param.srcHost = mem.host_pointer;
930                         param.srcPitch = src_pitch;
931                         param.WidthInBytes = param.srcPitch;
932                         param.Height = mem.data_height;
933
934                         cuda_assert(cuMemcpy2DUnaligned(&param));
935                 }
936                 else {
937                         /* 1D texture, using linear memory. */
938                         cmem = generic_alloc(mem);
939                         if(!cmem) {
940                                 return;
941                         }
942
943                         cuda_assert(cuMemcpyHtoD(mem.device_pointer, mem.host_pointer, size));
944                 }
945
946                 if(!has_fermi_limits) {
947                         /* Kepler+, bindless textures. */
948                         int flat_slot = 0;
949                         if(string_startswith(mem.name, "__tex_image")) {
950                                 int pos =  string(mem.name).rfind("_");
951                                 flat_slot = atoi(mem.name + pos + 1);
952                         }
953                         else {
954                                 assert(0);
955                         }
956
957                         CUDA_RESOURCE_DESC resDesc;
958                         memset(&resDesc, 0, sizeof(resDesc));
959
960                         if(array_3d) {
961                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
962                                 resDesc.res.array.hArray = array_3d;
963                                 resDesc.flags = 0;
964                         }
965                         else if(mem.data_height > 0) {
966                                 resDesc.resType = CU_RESOURCE_TYPE_PITCH2D;
967                                 resDesc.res.pitch2D.devPtr = mem.device_pointer;
968                                 resDesc.res.pitch2D.format = format;
969                                 resDesc.res.pitch2D.numChannels = mem.data_elements;
970                                 resDesc.res.pitch2D.height = mem.data_height;
971                                 resDesc.res.pitch2D.width = mem.data_width;
972                                 resDesc.res.pitch2D.pitchInBytes = dst_pitch;
973                         }
974                         else {
975                                 resDesc.resType = CU_RESOURCE_TYPE_LINEAR;
976                                 resDesc.res.linear.devPtr = mem.device_pointer;
977                                 resDesc.res.linear.format = format;
978                                 resDesc.res.linear.numChannels = mem.data_elements;
979                                 resDesc.res.linear.sizeInBytes = mem.device_size;
980                         }
981
982                         CUDA_TEXTURE_DESC texDesc;
983                         memset(&texDesc, 0, sizeof(texDesc));
984                         texDesc.addressMode[0] = address_mode;
985                         texDesc.addressMode[1] = address_mode;
986                         texDesc.addressMode[2] = address_mode;
987                         texDesc.filterMode = filter_mode;
988                         texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
989
990                         cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
991
992                         /* Resize once */
993                         if(flat_slot >= texture_info.size()) {
994                                 /* Allocate some slots in advance, to reduce amount
995                                  * of re-allocations. */
996                                 texture_info.resize(flat_slot + 128);
997                         }
998
999                         /* Set Mapping and tag that we need to (re-)upload to device */
1000                         TextureInfo& info = texture_info[flat_slot];
1001                         info.data = (uint64_t)cmem->texobject;
1002                         info.cl_buffer = 0;
1003                         info.interpolation = mem.interpolation;
1004                         info.extension = mem.extension;
1005                         info.width = mem.data_width;
1006                         info.height = mem.data_height;
1007                         info.depth = mem.data_depth;
1008                         need_texture_info = true;
1009                 }
1010                 else {
1011                         /* Fermi, fixed texture slots. */
1012                         if(array_3d) {
1013                                 cuda_assert(cuTexRefSetArray(texref, array_3d, CU_TRSA_OVERRIDE_FORMAT));
1014                         }
1015                         else if(mem.data_height > 0) {
1016                                 CUDA_ARRAY_DESCRIPTOR array_desc;
1017                                 array_desc.Format = format;
1018                                 array_desc.Height = mem.data_height;
1019                                 array_desc.Width = mem.data_width;
1020                                 array_desc.NumChannels = mem.data_elements;
1021                                 cuda_assert(cuTexRefSetAddress2D_v3(texref, &array_desc, mem.device_pointer, dst_pitch));
1022                         }
1023                         else {
1024                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
1025                         }
1026
1027                         /* Attach to texture reference. */
1028                         cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
1029                         cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
1030                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
1031                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
1032                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
1033                         if(mem.data_depth > 1) {
1034                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
1035                         }
1036                 }
1037         }
1038
1039         void tex_free(device_memory& mem)
1040         {
1041                 if(mem.device_pointer) {
1042                         CUDAContextScope scope(this);
1043                         const CUDAMem& cmem = cuda_mem_map[&mem];
1044
1045                         if(cmem.texobject) {
1046                                 /* Free bindless texture. */
1047                                 cuTexObjectDestroy(cmem.texobject);
1048                         }
1049
1050                         if(cmem.array) {
1051                                 /* Free array. */
1052                                 cuArrayDestroy(cmem.array);
1053                                 stats.mem_free(mem.device_size);
1054                                 mem.device_pointer = 0;
1055                                 mem.device_size = 0;
1056
1057                                 cuda_mem_map.erase(cuda_mem_map.find(&mem));
1058                         }
1059                         else {
1060                                 generic_free(mem);
1061                         }
1062                 }
1063         }
1064
1065         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
1066         {
1067                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.host_pointer;
1068                 for(int i = 0; i < 9; i++) {
1069                         tiles->buffers[i] = buffers[i];
1070                 }
1071
1072                 task->tiles_mem.copy_to_device();
1073
1074                 return !have_error();
1075         }
1076
1077 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
1078                         int threads_per_block;                                                                              \
1079                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1080                         int threads = (int)sqrt((float)threads_per_block);                                                  \
1081                         int xblocks = ((w) + threads - 1)/threads;                                                          \
1082                         int yblocks = ((h) + threads - 1)/threads;
1083
1084 #define CUDA_LAUNCH_KERNEL(func, args)                      \
1085                         cuda_assert(cuLaunchKernel(func,                \
1086                                                    xblocks, yblocks, 1, \
1087                                                    threads, threads, 1, \
1088                                                    0, 0, args, 0));
1089
1090 /* Similar as above, but for 1-dimensional blocks. */
1091 #define CUDA_GET_BLOCKSIZE_1D(func, w, h)                                                                       \
1092                         int threads_per_block;                                                                              \
1093                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1094                         int xblocks = ((w) + threads_per_block - 1)/threads_per_block;                                      \
1095                         int yblocks = h;
1096
1097 #define CUDA_LAUNCH_KERNEL_1D(func, args)                       \
1098                         cuda_assert(cuLaunchKernel(func,                    \
1099                                                    xblocks, yblocks, 1,     \
1100                                                    threads_per_block, 1, 1, \
1101                                                    0, 0, args, 0));
1102
1103         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
1104                                        DenoisingTask *task)
1105         {
1106                 if(have_error())
1107                         return false;
1108
1109                 CUDAContextScope scope(this);
1110
1111                 int stride = task->buffer.stride;
1112                 int w = task->buffer.width;
1113                 int h = task->buffer.h;
1114                 int r = task->nlm_state.r;
1115                 int f = task->nlm_state.f;
1116                 float a = task->nlm_state.a;
1117                 float k_2 = task->nlm_state.k_2;
1118
1119                 int shift_stride = stride*h;
1120                 int num_shifts = (2*r+1)*(2*r+1);
1121                 int mem_size = sizeof(float)*shift_stride*2*num_shifts;
1122                 int channel_offset = 0;
1123
1124                 CUdeviceptr temporary_mem;
1125                 cuda_assert(cuMemAlloc(&temporary_mem, mem_size));
1126                 CUdeviceptr difference     = temporary_mem;
1127                 CUdeviceptr blurDifference = temporary_mem + sizeof(float)*shift_stride * num_shifts;
1128
1129                 CUdeviceptr weightAccum = task->nlm_state.temporary_3_ptr;
1130                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*shift_stride));
1131                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*shift_stride));
1132
1133                 {
1134                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput;
1135                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1136                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1137                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1138                         cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
1139
1140                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
1141                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
1142                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
1143                         cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
1144
1145                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference, w*h, num_shifts);
1146
1147                         void *calc_difference_args[] = {&guide_ptr, &variance_ptr, &difference, &w, &h, &stride, &shift_stride, &r, &channel_offset, &a, &k_2};
1148                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &shift_stride, &r, &f};
1149                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &shift_stride, &r, &f};
1150                         void *update_output_args[]   = {&blurDifference, &image_ptr, &out_ptr, &weightAccum, &w, &h, &stride, &shift_stride, &r, &f};
1151
1152                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1153                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1154                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1155                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1156                         CUDA_LAUNCH_KERNEL_1D(cuNLMUpdateOutput, update_output_args);
1157                 }
1158
1159                 cuMemFree(temporary_mem);
1160
1161                 {
1162                         CUfunction cuNLMNormalize;
1163                         cuda_assert(cuModuleGetFunction(&cuNLMNormalize, cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
1164                         cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize, CU_FUNC_CACHE_PREFER_L1));
1165                         void *normalize_args[] = {&out_ptr, &weightAccum, &w, &h, &stride};
1166                         CUDA_GET_BLOCKSIZE(cuNLMNormalize, w, h);
1167                         CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1168                         cuda_assert(cuCtxSynchronize());
1169                 }
1170
1171                 return !have_error();
1172         }
1173
1174         bool denoising_construct_transform(DenoisingTask *task)
1175         {
1176                 if(have_error())
1177                         return false;
1178
1179                 CUDAContextScope scope(this);
1180
1181                 CUfunction cuFilterConstructTransform;
1182                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1183                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1184                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1185                                    task->storage.w,
1186                                    task->storage.h);
1187
1188                 void *args[] = {&task->buffer.mem.device_pointer,
1189                                 &task->storage.transform.device_pointer,
1190                                 &task->storage.rank.device_pointer,
1191                                 &task->filter_area,
1192                                 &task->rect,
1193                                 &task->radius,
1194                                 &task->pca_threshold,
1195                                 &task->buffer.pass_stride};
1196                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1197                 cuda_assert(cuCtxSynchronize());
1198
1199                 return !have_error();
1200         }
1201
1202         bool denoising_reconstruct(device_ptr color_ptr,
1203                                    device_ptr color_variance_ptr,
1204                                    device_ptr output_ptr,
1205                                    DenoisingTask *task)
1206         {
1207                 if(have_error())
1208                         return false;
1209
1210                 CUDAContextScope scope(this);
1211
1212                 mem_zero(task->storage.XtWX);
1213                 mem_zero(task->storage.XtWY);
1214
1215                 int r = task->radius;
1216                 int f = 4;
1217                 float a = 1.0f;
1218                 float k_2 = task->nlm_k_2;
1219
1220                 int w = task->reconstruction_state.source_w;
1221                 int h = task->reconstruction_state.source_h;
1222                 int stride = task->buffer.stride;
1223
1224                 int shift_stride = stride*h;
1225                 int num_shifts = (2*r+1)*(2*r+1);
1226                 int mem_size = sizeof(float)*shift_stride*num_shifts;
1227
1228                 CUdeviceptr temporary_mem;
1229                 cuda_assert(cuMemAlloc(&temporary_mem, 2*mem_size));
1230                 CUdeviceptr difference     = temporary_mem;
1231                 CUdeviceptr blurDifference = temporary_mem + mem_size;
1232
1233                 {
1234                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian;
1235                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1236                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1237                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1238                         cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1239
1240                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1241                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1242                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1243                         cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1244
1245                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference,
1246                                              task->reconstruction_state.source_w * task->reconstruction_state.source_h,
1247                                              num_shifts);
1248
1249                         void *calc_difference_args[] = {&color_ptr, &color_variance_ptr, &difference, &w, &h, &stride, &shift_stride, &r, &task->buffer.pass_stride, &a, &k_2};
1250                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &shift_stride, &r, &f};
1251                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &shift_stride, &r, &f};
1252                         void *construct_gramian_args[] = {&blurDifference,
1253                                                           &task->buffer.mem.device_pointer,
1254                                                           &task->storage.transform.device_pointer,
1255                                                           &task->storage.rank.device_pointer,
1256                                                           &task->storage.XtWX.device_pointer,
1257                                                           &task->storage.XtWY.device_pointer,
1258                                                           &task->reconstruction_state.filter_window,
1259                                                           &w, &h, &stride,
1260                                                           &shift_stride, &r,
1261                                                           &f,
1262                                                       &task->buffer.pass_stride};
1263
1264                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1265                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1266                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1267                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1268                         CUDA_LAUNCH_KERNEL_1D(cuNLMConstructGramian, construct_gramian_args);
1269                 }
1270
1271                 cuMemFree(temporary_mem);
1272
1273                 {
1274                         CUfunction cuFinalize;
1275                         cuda_assert(cuModuleGetFunction(&cuFinalize, cuFilterModule, "kernel_cuda_filter_finalize"));
1276                         cuda_assert(cuFuncSetCacheConfig(cuFinalize, CU_FUNC_CACHE_PREFER_L1));
1277                         void *finalize_args[] = {&output_ptr,
1278                                                          &task->storage.rank.device_pointer,
1279                                                          &task->storage.XtWX.device_pointer,
1280                                                          &task->storage.XtWY.device_pointer,
1281                                                          &task->filter_area,
1282                                                          &task->reconstruction_state.buffer_params.x,
1283                                                          &task->render_buffer.samples};
1284                         CUDA_GET_BLOCKSIZE(cuFinalize,
1285                                            task->reconstruction_state.source_w,
1286                                            task->reconstruction_state.source_h);
1287                         CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1288                 }
1289
1290                 cuda_assert(cuCtxSynchronize());
1291
1292                 return !have_error();
1293         }
1294
1295         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1296                                       device_ptr mean_ptr, device_ptr variance_ptr,
1297                                       int r, int4 rect, DenoisingTask *task)
1298         {
1299                 if(have_error())
1300                         return false;
1301
1302                 CUDAContextScope scope(this);
1303
1304                 CUfunction cuFilterCombineHalves;
1305                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1306                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1307                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1308                                    task->rect.z-task->rect.x,
1309                                    task->rect.w-task->rect.y);
1310
1311                 void *args[] = {&mean_ptr,
1312                                 &variance_ptr,
1313                                 &a_ptr,
1314                                 &b_ptr,
1315                                 &rect,
1316                                 &r};
1317                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1318                 cuda_assert(cuCtxSynchronize());
1319
1320                 return !have_error();
1321         }
1322
1323         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1324                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1325                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1326         {
1327                 if(have_error())
1328                         return false;
1329
1330                 CUDAContextScope scope(this);
1331
1332                 CUfunction cuFilterDivideShadow;
1333                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1334                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1335                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1336                                    task->rect.z-task->rect.x,
1337                                    task->rect.w-task->rect.y);
1338
1339                 void *args[] = {&task->render_buffer.samples,
1340                                 &task->tiles_mem.device_pointer,
1341                                 &a_ptr,
1342                                 &b_ptr,
1343                                 &sample_variance_ptr,
1344                                 &sv_variance_ptr,
1345                                 &buffer_variance_ptr,
1346                                 &task->rect,
1347                                 &task->render_buffer.pass_stride,
1348                                 &task->render_buffer.denoising_data_offset};
1349                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1350                 cuda_assert(cuCtxSynchronize());
1351
1352                 return !have_error();
1353         }
1354
1355         bool denoising_get_feature(int mean_offset,
1356                                    int variance_offset,
1357                                    device_ptr mean_ptr,
1358                                    device_ptr variance_ptr,
1359                                    DenoisingTask *task)
1360         {
1361                 if(have_error())
1362                         return false;
1363
1364                 CUDAContextScope scope(this);
1365
1366                 CUfunction cuFilterGetFeature;
1367                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1368                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1369                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1370                                    task->rect.z-task->rect.x,
1371                                    task->rect.w-task->rect.y);
1372
1373                 void *args[] = {&task->render_buffer.samples,
1374                                 &task->tiles_mem.device_pointer,
1375                                         &mean_offset,
1376                                         &variance_offset,
1377                                 &mean_ptr,
1378                                 &variance_ptr,
1379                                 &task->rect,
1380                                 &task->render_buffer.pass_stride,
1381                                 &task->render_buffer.denoising_data_offset};
1382                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1383                 cuda_assert(cuCtxSynchronize());
1384
1385                 return !have_error();
1386         }
1387
1388         bool denoising_detect_outliers(device_ptr image_ptr,
1389                                        device_ptr variance_ptr,
1390                                        device_ptr depth_ptr,
1391                                        device_ptr output_ptr,
1392                                        DenoisingTask *task)
1393         {
1394                 if(have_error())
1395                         return false;
1396
1397                 CUDAContextScope scope(this);
1398
1399                 CUfunction cuFilterDetectOutliers;
1400                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1401                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1402                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1403                                    task->rect.z-task->rect.x,
1404                                    task->rect.w-task->rect.y);
1405
1406                 void *args[] = {&image_ptr,
1407                                 &variance_ptr,
1408                                 &depth_ptr,
1409                                 &output_ptr,
1410                                 &task->rect,
1411                                 &task->buffer.pass_stride};
1412
1413                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1414                 cuda_assert(cuCtxSynchronize());
1415
1416                 return !have_error();
1417         }
1418
1419         void denoise(RenderTile &rtile, DenoisingTask& denoising, const DeviceTask &task)
1420         {
1421                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1422                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1423                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1424                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1425                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1426                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1427                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1428                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1429
1430                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1431                 denoising.render_buffer.samples = rtile.sample;
1432
1433                 RenderTile rtiles[9];
1434                 rtiles[4] = rtile;
1435                 task.map_neighbor_tiles(rtiles, this);
1436                 denoising.tiles_from_rendertiles(rtiles);
1437
1438                 denoising.init_from_devicetask(task);
1439
1440                 denoising.run_denoising();
1441
1442                 task.unmap_neighbor_tiles(rtiles, this);
1443         }
1444
1445         void path_trace(DeviceTask& task, RenderTile& rtile, device_vector<WorkTile>& work_tiles)
1446         {
1447                 scoped_timer timer(&rtile.buffers->render_time);
1448
1449                 if(have_error())
1450                         return;
1451
1452                 CUDAContextScope scope(this);
1453                 CUfunction cuPathTrace;
1454
1455                 /* Get kernel function. */
1456                 if(task.integrator_branched) {
1457                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1458                 }
1459                 else {
1460                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1461                 }
1462
1463                 if(have_error()) {
1464                         return;
1465                 }
1466
1467                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1468
1469                 /* Allocate work tile. */
1470                 work_tiles.alloc(1);
1471
1472                 WorkTile *wtile = work_tiles.data();
1473                 wtile->x = rtile.x;
1474                 wtile->y = rtile.y;
1475                 wtile->w = rtile.w;
1476                 wtile->h = rtile.h;
1477                 wtile->offset = rtile.offset;
1478                 wtile->stride = rtile.stride;
1479                 wtile->buffer = (float*)cuda_device_ptr(rtile.buffer);
1480
1481                 /* Prepare work size. More step samples render faster, but for now we
1482                  * remain conservative for GPUs connected to a display to avoid driver
1483                  * timeouts and display freezing. */
1484                 int min_blocks, num_threads_per_block;
1485                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
1486                 if(!info.display_device) {
1487                         min_blocks *= 8;
1488                 }
1489
1490                 uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);;
1491
1492                 /* Render all samples. */
1493                 int start_sample = rtile.start_sample;
1494                 int end_sample = rtile.start_sample + rtile.num_samples;
1495
1496                 for(int sample = start_sample; sample < end_sample; sample += step_samples) {
1497                         /* Setup and copy work tile to device. */
1498                         wtile->start_sample = sample;
1499                         wtile->num_samples = min(step_samples, end_sample - sample);;
1500                         work_tiles.copy_to_device();
1501
1502                         CUdeviceptr d_work_tiles = cuda_device_ptr(work_tiles.device_pointer);
1503                         uint total_work_size = wtile->w * wtile->h * wtile->num_samples;
1504                         uint num_blocks = divide_up(total_work_size, num_threads_per_block);
1505
1506                         /* Launch kernel. */
1507                         void *args[] = {&d_work_tiles,
1508                                         &total_work_size};
1509
1510                         cuda_assert(cuLaunchKernel(cuPathTrace,
1511                                                    num_blocks, 1, 1,
1512                                                    num_threads_per_block, 1, 1,
1513                                                    0, 0, args, 0));
1514
1515                         cuda_assert(cuCtxSynchronize());
1516
1517                         /* Update progress. */
1518                         rtile.sample = sample + wtile->num_samples;
1519                         task.update_progress(&rtile, rtile.w*rtile.h*wtile->num_samples);
1520
1521                         if(task.get_cancel()) {
1522                                 if(task.need_finish_queue == false)
1523                                         break;
1524                         }
1525                 }
1526         }
1527
1528         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1529         {
1530                 if(have_error())
1531                         return;
1532
1533                 CUDAContextScope scope(this);
1534
1535                 CUfunction cuFilmConvert;
1536                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1537                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1538
1539                 /* get kernel function */
1540                 if(rgba_half) {
1541                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1542                 }
1543                 else {
1544                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1545                 }
1546
1547
1548                 float sample_scale = 1.0f/(task.sample + 1);
1549
1550                 /* pass in parameters */
1551                 void *args[] = {&d_rgba,
1552                                 &d_buffer,
1553                                 &sample_scale,
1554                                 &task.x,
1555                                 &task.y,
1556                                 &task.w,
1557                                 &task.h,
1558                                 &task.offset,
1559                                 &task.stride};
1560
1561                 /* launch kernel */
1562                 int threads_per_block;
1563                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1564
1565                 int xthreads = (int)sqrt(threads_per_block);
1566                 int ythreads = (int)sqrt(threads_per_block);
1567                 int xblocks = (task.w + xthreads - 1)/xthreads;
1568                 int yblocks = (task.h + ythreads - 1)/ythreads;
1569
1570                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1571
1572                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1573                                            xblocks , yblocks, 1, /* blocks */
1574                                            xthreads, ythreads, 1, /* threads */
1575                                            0, 0, args, 0));
1576
1577                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1578
1579                 cuda_assert(cuCtxSynchronize());
1580         }
1581
1582         void shader(DeviceTask& task)
1583         {
1584                 if(have_error())
1585                         return;
1586
1587                 CUDAContextScope scope(this);
1588
1589                 CUfunction cuShader;
1590                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1591                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1592
1593                 /* get kernel function */
1594                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1595                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1596                 }
1597                 else if(task.shader_eval_type == SHADER_EVAL_DISPLACE) {
1598                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_displace"));
1599                 }
1600                 else {
1601                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_background"));
1602                 }
1603
1604                 /* do tasks in smaller chunks, so we can cancel it */
1605                 const int shader_chunk_size = 65536;
1606                 const int start = task.shader_x;
1607                 const int end = task.shader_x + task.shader_w;
1608                 int offset = task.offset;
1609
1610                 bool canceled = false;
1611                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1612                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1613                                 int shader_w = min(shader_chunk_size, end - shader_x);
1614
1615                                 /* pass in parameters */
1616                                 void *args[8];
1617                                 int arg = 0;
1618                                 args[arg++] = &d_input;
1619                                 args[arg++] = &d_output;
1620                                 args[arg++] = &task.shader_eval_type;
1621                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1622                                         args[arg++] = &task.shader_filter;
1623                                 }
1624                                 args[arg++] = &shader_x;
1625                                 args[arg++] = &shader_w;
1626                                 args[arg++] = &offset;
1627                                 args[arg++] = &sample;
1628
1629                                 /* launch kernel */
1630                                 int threads_per_block;
1631                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1632
1633                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1634
1635                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1636                                 cuda_assert(cuLaunchKernel(cuShader,
1637                                                            xblocks , 1, 1, /* blocks */
1638                                                            threads_per_block, 1, 1, /* threads */
1639                                                            0, 0, args, 0));
1640
1641                                 cuda_assert(cuCtxSynchronize());
1642
1643                                 if(task.get_cancel()) {
1644                                         canceled = true;
1645                                         break;
1646                                 }
1647                         }
1648
1649                         task.update_progress(NULL);
1650                 }
1651         }
1652
1653         CUdeviceptr map_pixels(device_ptr mem)
1654         {
1655                 if(!background) {
1656                         PixelMem pmem = pixel_mem_map[mem];
1657                         CUdeviceptr buffer;
1658
1659                         size_t bytes;
1660                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1661                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1662
1663                         return buffer;
1664                 }
1665
1666                 return cuda_device_ptr(mem);
1667         }
1668
1669         void unmap_pixels(device_ptr mem)
1670         {
1671                 if(!background) {
1672                         PixelMem pmem = pixel_mem_map[mem];
1673
1674                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1675                 }
1676         }
1677
1678         void pixels_alloc(device_memory& mem)
1679         {
1680                 PixelMem pmem;
1681
1682                 pmem.w = mem.data_width;
1683                 pmem.h = mem.data_height;
1684
1685                 CUDAContextScope scope(this);
1686
1687                 glGenBuffers(1, &pmem.cuPBO);
1688                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1689                 if(mem.data_type == TYPE_HALF)
1690                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1691                 else
1692                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1693
1694                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1695
1696                 glGenTextures(1, &pmem.cuTexId);
1697                 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1698                 if(mem.data_type == TYPE_HALF)
1699                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1700                 else
1701                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1702                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1703                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1704                 glBindTexture(GL_TEXTURE_2D, 0);
1705
1706                 CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1707
1708                 if(result == CUDA_SUCCESS) {
1709                         mem.device_pointer = pmem.cuTexId;
1710                         pixel_mem_map[mem.device_pointer] = pmem;
1711
1712                         mem.device_size = mem.memory_size();
1713                         stats.mem_alloc(mem.device_size);
1714
1715                         return;
1716                 }
1717                 else {
1718                         /* failed to register buffer, fallback to no interop */
1719                         glDeleteBuffers(1, &pmem.cuPBO);
1720                         glDeleteTextures(1, &pmem.cuTexId);
1721
1722                         background = true;
1723                 }
1724         }
1725
1726         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1727         {
1728                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1729
1730                 CUDAContextScope scope(this);
1731
1732                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1733                 uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1734                 size_t offset = sizeof(uchar)*4*y*w;
1735                 memcpy((uchar*)mem.host_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1736                 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1737                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1738         }
1739
1740         void pixels_free(device_memory& mem)
1741         {
1742                 if(mem.device_pointer) {
1743                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1744
1745                         CUDAContextScope scope(this);
1746
1747                         cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1748                         glDeleteBuffers(1, &pmem.cuPBO);
1749                         glDeleteTextures(1, &pmem.cuTexId);
1750
1751                         pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1752                         mem.device_pointer = 0;
1753
1754                         stats.mem_free(mem.device_size);
1755                         mem.device_size = 0;
1756                 }
1757         }
1758
1759         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
1760                 const DeviceDrawParams &draw_params)
1761         {
1762                 assert(mem.type == MEM_PIXELS);
1763
1764                 if(!background) {
1765                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1766                         float *vpointer;
1767
1768                         CUDAContextScope scope(this);
1769
1770                         /* for multi devices, this assumes the inefficient method that we allocate
1771                          * all pixels on the device even though we only render to a subset */
1772                         size_t offset = 4*y*w;
1773
1774                         if(mem.data_type == TYPE_HALF)
1775                                 offset *= sizeof(GLhalf);
1776                         else
1777                                 offset *= sizeof(uint8_t);
1778
1779                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1780                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1781                         if(mem.data_type == TYPE_HALF)
1782                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1783                         else
1784                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1785                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1786
1787                         glEnable(GL_TEXTURE_2D);
1788
1789                         if(transparent) {
1790                                 glEnable(GL_BLEND);
1791                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1792                         }
1793
1794                         glColor3f(1.0f, 1.0f, 1.0f);
1795
1796                         if(draw_params.bind_display_space_shader_cb) {
1797                                 draw_params.bind_display_space_shader_cb();
1798                         }
1799
1800                         if(!vertex_buffer)
1801                                 glGenBuffers(1, &vertex_buffer);
1802
1803                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
1804                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
1805                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
1806
1807                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
1808
1809                         if(vpointer) {
1810                                 /* texture coordinate - vertex pair */
1811                                 vpointer[0] = 0.0f;
1812                                 vpointer[1] = 0.0f;
1813                                 vpointer[2] = dx;
1814                                 vpointer[3] = dy;
1815
1816                                 vpointer[4] = (float)w/(float)pmem.w;
1817                                 vpointer[5] = 0.0f;
1818                                 vpointer[6] = (float)width + dx;
1819                                 vpointer[7] = dy;
1820
1821                                 vpointer[8] = (float)w/(float)pmem.w;
1822                                 vpointer[9] = (float)h/(float)pmem.h;
1823                                 vpointer[10] = (float)width + dx;
1824                                 vpointer[11] = (float)height + dy;
1825
1826                                 vpointer[12] = 0.0f;
1827                                 vpointer[13] = (float)h/(float)pmem.h;
1828                                 vpointer[14] = dx;
1829                                 vpointer[15] = (float)height + dy;
1830
1831                                 glUnmapBuffer(GL_ARRAY_BUFFER);
1832                         }
1833
1834                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
1835                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
1836
1837                         glEnableClientState(GL_VERTEX_ARRAY);
1838                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1839
1840                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
1841
1842                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1843                         glDisableClientState(GL_VERTEX_ARRAY);
1844
1845                         glBindBuffer(GL_ARRAY_BUFFER, 0);
1846
1847                         if(draw_params.unbind_display_space_shader_cb) {
1848                                 draw_params.unbind_display_space_shader_cb();
1849                         }
1850
1851                         if(transparent)
1852                                 glDisable(GL_BLEND);
1853
1854                         glBindTexture(GL_TEXTURE_2D, 0);
1855                         glDisable(GL_TEXTURE_2D);
1856
1857                         return;
1858                 }
1859
1860                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
1861         }
1862
1863         void thread_run(DeviceTask *task)
1864         {
1865                 CUDAContextScope scope(this);
1866
1867                 if(task->type == DeviceTask::RENDER) {
1868                         DeviceRequestedFeatures requested_features;
1869                         if(use_split_kernel()) {
1870                                 if(split_kernel == NULL) {
1871                                         split_kernel = new CUDASplitKernel(this);
1872                                         split_kernel->load_kernels(requested_features);
1873                                 }
1874                         }
1875
1876                         device_vector<WorkTile> work_tiles(this, "work_tiles", MEM_READ_ONLY);
1877
1878                         /* keep rendering tiles until done */
1879                         RenderTile tile;
1880                         DenoisingTask denoising(this);
1881
1882                         while(task->acquire_tile(this, tile)) {
1883                                 if(tile.task == RenderTile::PATH_TRACE) {
1884                                         if(use_split_kernel()) {
1885                                                 device_only_memory<uchar> void_buffer(this, "void_buffer");
1886                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
1887                                         }
1888                                         else {
1889                                                 path_trace(*task, tile, work_tiles);
1890                                         }
1891                                 }
1892                                 else if(tile.task == RenderTile::DENOISE) {
1893                                         tile.sample = tile.start_sample + tile.num_samples;
1894
1895                                         denoise(tile, denoising, *task);
1896
1897                                         task->update_progress(&tile, tile.w*tile.h);
1898                                 }
1899
1900                                 task->release_tile(tile);
1901
1902                                 if(task->get_cancel()) {
1903                                         if(task->need_finish_queue == false)
1904                                                 break;
1905                                 }
1906                         }
1907
1908                         work_tiles.free();
1909                 }
1910                 else if(task->type == DeviceTask::SHADER) {
1911                         shader(*task);
1912
1913                         cuda_assert(cuCtxSynchronize());
1914                 }
1915         }
1916
1917         class CUDADeviceTask : public DeviceTask {
1918         public:
1919                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
1920                 : DeviceTask(task)
1921                 {
1922                         run = function_bind(&CUDADevice::thread_run, device, this);
1923                 }
1924         };
1925
1926         int get_split_task_count(DeviceTask& /*task*/)
1927         {
1928                 return 1;
1929         }
1930
1931         void task_add(DeviceTask& task)
1932         {
1933                 CUDAContextScope scope(this);
1934
1935                 /* Load texture info. */
1936                 load_texture_info();
1937
1938                 /* Synchronize all memory copies before executing task. */
1939                 cuda_assert(cuCtxSynchronize());
1940
1941                 if(task.type == DeviceTask::FILM_CONVERT) {
1942                         /* must be done in main thread due to opengl access */
1943                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
1944                 }
1945                 else {
1946                         task_pool.push(new CUDADeviceTask(this, task));
1947                 }
1948         }
1949
1950         void task_wait()
1951         {
1952                 task_pool.wait();
1953         }
1954
1955         void task_cancel()
1956         {
1957                 task_pool.cancel();
1958         }
1959
1960         friend class CUDASplitKernelFunction;
1961         friend class CUDASplitKernel;
1962         friend class CUDAContextScope;
1963 };
1964
1965 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
1966  * now that the definition of that class is complete
1967  */
1968 #undef cuda_assert
1969 #define cuda_assert(stmt) \
1970         { \
1971                 CUresult result = stmt; \
1972                 \
1973                 if(result != CUDA_SUCCESS) { \
1974                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
1975                         if(device->error_msg == "") \
1976                                 device->error_msg = message; \
1977                         fprintf(stderr, "%s\n", message.c_str()); \
1978                         /*cuda_abort();*/ \
1979                         device->cuda_error_documentation(); \
1980                 } \
1981         } (void)0
1982
1983
1984 /* CUDA context scope. */
1985
1986 CUDAContextScope::CUDAContextScope(CUDADevice *device)
1987 : device(device)
1988 {
1989         cuda_assert(cuCtxPushCurrent(device->cuContext));
1990 }
1991
1992 CUDAContextScope::~CUDAContextScope()
1993 {
1994         cuda_assert(cuCtxPopCurrent(NULL));
1995 }
1996
1997 /* split kernel */
1998
1999 class CUDASplitKernelFunction : public SplitKernelFunction{
2000         CUDADevice* device;
2001         CUfunction func;
2002 public:
2003         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
2004
2005         /* enqueue the kernel, returns false if there is an error */
2006         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
2007         {
2008                 return enqueue(dim, NULL);
2009         }
2010
2011         /* enqueue the kernel, returns false if there is an error */
2012         bool enqueue(const KernelDimensions &dim, void *args[])
2013         {
2014                 if(device->have_error())
2015                         return false;
2016
2017                 CUDAContextScope scope(device);
2018
2019                 /* we ignore dim.local_size for now, as this is faster */
2020                 int threads_per_block;
2021                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
2022
2023                 int xblocks = (dim.global_size[0]*dim.global_size[1] + threads_per_block - 1)/threads_per_block;
2024
2025                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
2026
2027                 cuda_assert(cuLaunchKernel(func,
2028                                            xblocks, 1, 1, /* blocks */
2029                                            threads_per_block, 1, 1, /* threads */
2030                                            0, 0, args, 0));
2031
2032                 return !device->have_error();
2033         }
2034 };
2035
2036 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
2037 {
2038 }
2039
2040 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
2041 {
2042         CUDAContextScope scope(device);
2043
2044         device_vector<uint64_t> size_buffer(device, "size_buffer", MEM_READ_WRITE);
2045         size_buffer.alloc(1);
2046         size_buffer.zero_to_device();
2047
2048         uint threads = num_threads;
2049         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
2050
2051         struct args_t {
2052                 uint* num_threads;
2053                 CUdeviceptr* size;
2054         };
2055
2056         args_t args = {
2057                 &threads,
2058                 &d_size
2059         };
2060
2061         CUfunction state_buffer_size;
2062         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
2063
2064         cuda_assert(cuLaunchKernel(state_buffer_size,
2065                                    1, 1, 1,
2066                                    1, 1, 1,
2067                                    0, 0, (void**)&args, 0));
2068
2069         size_buffer.copy_from_device(0, 1, 1);
2070         size_t size = size_buffer[0];
2071         size_buffer.free();
2072
2073         return size;
2074 }
2075
2076 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
2077                                     RenderTile& rtile,
2078                                     int num_global_elements,
2079                                     device_memory& /*kernel_globals*/,
2080                                     device_memory& /*kernel_data*/,
2081                                     device_memory& split_data,
2082                                     device_memory& ray_state,
2083                                     device_memory& queue_index,
2084                                     device_memory& use_queues_flag,
2085                                     device_memory& work_pool_wgs)
2086 {
2087         CUDAContextScope scope(device);
2088
2089         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
2090         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
2091         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
2092         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
2093         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
2094
2095         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
2096
2097         int end_sample = rtile.start_sample + rtile.num_samples;
2098         int queue_size = dim.global_size[0] * dim.global_size[1];
2099
2100         struct args_t {
2101                 CUdeviceptr* split_data_buffer;
2102                 int* num_elements;
2103                 CUdeviceptr* ray_state;
2104                 int* start_sample;
2105                 int* end_sample;
2106                 int* sx;
2107                 int* sy;
2108                 int* sw;
2109                 int* sh;
2110                 int* offset;
2111                 int* stride;
2112                 CUdeviceptr* queue_index;
2113                 int* queuesize;
2114                 CUdeviceptr* use_queues_flag;
2115                 CUdeviceptr* work_pool_wgs;
2116                 int* num_samples;
2117                 CUdeviceptr* buffer;
2118         };
2119
2120         args_t args = {
2121                 &d_split_data,
2122                 &num_global_elements,
2123                 &d_ray_state,
2124                 &rtile.start_sample,
2125                 &end_sample,
2126                 &rtile.x,
2127                 &rtile.y,
2128                 &rtile.w,
2129                 &rtile.h,
2130                 &rtile.offset,
2131                 &rtile.stride,
2132                 &d_queue_index,
2133                 &queue_size,
2134                 &d_use_queues_flag,
2135                 &d_work_pool_wgs,
2136                 &rtile.num_samples,
2137                 &d_buffer
2138         };
2139
2140         CUfunction data_init;
2141         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2142         if(device->have_error()) {
2143                 return false;
2144         }
2145
2146         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2147
2148         return !device->have_error();
2149 }
2150
2151 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2152                                                                 const DeviceRequestedFeatures&)
2153 {
2154         CUDAContextScope scope(device);
2155         CUfunction func;
2156
2157         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2158         if(device->have_error()) {
2159                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2160                 return NULL;
2161         }
2162
2163         return new CUDASplitKernelFunction(device, func);
2164 }
2165
2166 int2 CUDASplitKernel::split_kernel_local_size()
2167 {
2168         return make_int2(32, 1);
2169 }
2170
2171 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2172 {
2173         CUDAContextScope scope(device);
2174         size_t free;
2175         size_t total;
2176
2177         cuda_assert(cuMemGetInfo(&free, &total));
2178
2179         VLOG(1) << "Maximum device allocation size: "
2180                 << string_human_readable_number(free) << " bytes. ("
2181                 << string_human_readable_size(free) << ").";
2182
2183         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2184         size_t side = round_down((int)sqrt(num_elements), 32);
2185         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2186         VLOG(1) << "Global size: " << global_size << ".";
2187         return global_size;
2188 }
2189
2190 bool device_cuda_init(void)
2191 {
2192 #ifdef WITH_CUDA_DYNLOAD
2193         static bool initialized = false;
2194         static bool result = false;
2195
2196         if(initialized)
2197                 return result;
2198
2199         initialized = true;
2200         int cuew_result = cuewInit();
2201         if(cuew_result == CUEW_SUCCESS) {
2202                 VLOG(1) << "CUEW initialization succeeded";
2203                 if(CUDADevice::have_precompiled_kernels()) {
2204                         VLOG(1) << "Found precompiled kernels";
2205                         result = true;
2206                 }
2207 #ifndef _WIN32
2208                 else if(cuewCompilerPath() != NULL) {
2209                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2210                         result = true;
2211                 }
2212                 else {
2213                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
2214                                 << " unable to use CUDA";
2215                 }
2216 #endif
2217         }
2218         else {
2219                 VLOG(1) << "CUEW initialization failed: "
2220                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2221                             ? "Error setting up atexit() handler"
2222                             : "Error opening the library");
2223         }
2224
2225         return result;
2226 #else  /* WITH_CUDA_DYNLOAD */
2227         return true;
2228 #endif /* WITH_CUDA_DYNLOAD */
2229 }
2230
2231 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2232 {
2233         return new CUDADevice(info, stats, background);
2234 }
2235
2236 static CUresult device_cuda_safe_init()
2237 {
2238 #ifdef _WIN32
2239         __try {
2240                 return cuInit(0);
2241         }
2242         __except(EXCEPTION_EXECUTE_HANDLER) {
2243                 /* Ignore crashes inside the CUDA driver and hope we can
2244                  * survive even with corrupted CUDA installs. */
2245                 fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");
2246         }
2247
2248         return CUDA_ERROR_NO_DEVICE;
2249 #else
2250         return cuInit(0);
2251 #endif
2252 }
2253
2254 void device_cuda_info(vector<DeviceInfo>& devices)
2255 {
2256         CUresult result = device_cuda_safe_init();
2257         if(result != CUDA_SUCCESS) {
2258                 if(result != CUDA_ERROR_NO_DEVICE)
2259                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2260                 return;
2261         }
2262
2263         int count = 0;
2264         result = cuDeviceGetCount(&count);
2265         if(result != CUDA_SUCCESS) {
2266                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2267                 return;
2268         }
2269
2270         vector<DeviceInfo> display_devices;
2271
2272         for(int num = 0; num < count; num++) {
2273                 char name[256];
2274
2275                 result = cuDeviceGetName(name, 256, num);
2276                 if(result != CUDA_SUCCESS) {
2277                         fprintf(stderr, "CUDA cuDeviceGetName: %s\n", cuewErrorString(result));
2278                         continue;
2279                 }
2280
2281                 int major;
2282                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2283                 if(major < 2) {
2284                         VLOG(1) << "Ignoring device \"" << name
2285                                 << "\", compute capability is too low.";
2286                         continue;
2287                 }
2288
2289                 DeviceInfo info;
2290
2291                 info.type = DEVICE_CUDA;
2292                 info.description = string(name);
2293                 info.num = num;
2294
2295                 info.advanced_shading = (major >= 2);
2296                 info.has_fermi_limits = !(major >= 3);
2297                 info.has_half_images = (major >= 3);
2298                 info.has_volume_decoupled = false;
2299                 info.has_qbvh = false;
2300
2301                 int pci_location[3] = {0, 0, 0};
2302                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2303                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2304                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2305                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2306                                         name,
2307                                         (unsigned int)pci_location[0],
2308                                         (unsigned int)pci_location[1],
2309                                         (unsigned int)pci_location[2]);
2310
2311                 /* If device has a kernel timeout and no compute preemption, we assume
2312                  * it is connected to a display and will freeze the display while doing
2313                  * computations. */
2314                 int timeout_attr = 0, preempt_attr = 0;
2315                 cuDeviceGetAttribute(&timeout_attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num);
2316                 cuDeviceGetAttribute(&preempt_attr, CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, num);
2317
2318                 if(timeout_attr && !preempt_attr) {
2319                         VLOG(1) << "Device is recognized as display.";
2320                         info.description += " (Display)";
2321                         info.display_device = true;
2322                         display_devices.push_back(info);
2323                 }
2324                 else {
2325                         devices.push_back(info);
2326                 }
2327                 VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
2328         }
2329
2330         if(!display_devices.empty())
2331                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2332 }
2333
2334 string device_cuda_capabilities(void)
2335 {
2336         CUresult result = device_cuda_safe_init();
2337         if(result != CUDA_SUCCESS) {
2338                 if(result != CUDA_ERROR_NO_DEVICE) {
2339                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2340                 }
2341                 return "No CUDA device found\n";
2342         }
2343
2344         int count;
2345         result = cuDeviceGetCount(&count);
2346         if(result != CUDA_SUCCESS) {
2347                 return string("Error getting devices: ") + cuewErrorString(result);
2348         }
2349
2350         string capabilities = "";
2351         for(int num = 0; num < count; num++) {
2352                 char name[256];
2353                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2354                         continue;
2355                 }
2356                 capabilities += string("\t") + name + "\n";
2357                 int value;
2358 #define GET_ATTR(attr) \
2359                 { \
2360                         if(cuDeviceGetAttribute(&value, \
2361                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2362                                                 num) == CUDA_SUCCESS) \
2363                         { \
2364                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2365                                                               value); \
2366                         } \
2367                 } (void)0
2368                 /* TODO(sergey): Strip all attributes which are not useful for us
2369                  * or does not depend on the driver.
2370                  */
2371                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2372                 GET_ATTR(MAX_BLOCK_DIM_X);
2373                 GET_ATTR(MAX_BLOCK_DIM_Y);
2374                 GET_ATTR(MAX_BLOCK_DIM_Z);
2375                 GET_ATTR(MAX_GRID_DIM_X);
2376                 GET_ATTR(MAX_GRID_DIM_Y);
2377                 GET_ATTR(MAX_GRID_DIM_Z);
2378                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2379                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2380                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2381                 GET_ATTR(WARP_SIZE);
2382                 GET_ATTR(MAX_PITCH);
2383                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2384                 GET_ATTR(REGISTERS_PER_BLOCK);
2385                 GET_ATTR(CLOCK_RATE);
2386                 GET_ATTR(TEXTURE_ALIGNMENT);
2387                 GET_ATTR(GPU_OVERLAP);
2388                 GET_ATTR(MULTIPROCESSOR_COUNT);
2389                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2390                 GET_ATTR(INTEGRATED);
2391                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2392                 GET_ATTR(COMPUTE_MODE);
2393                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2394                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2395                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2396                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2397                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2398                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2399                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2400                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2401                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2402                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2403                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2404                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2405                 GET_ATTR(SURFACE_ALIGNMENT);
2406                 GET_ATTR(CONCURRENT_KERNELS);
2407                 GET_ATTR(ECC_ENABLED);
2408                 GET_ATTR(TCC_DRIVER);
2409                 GET_ATTR(MEMORY_CLOCK_RATE);
2410                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2411                 GET_ATTR(L2_CACHE_SIZE);
2412                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2413                 GET_ATTR(ASYNC_ENGINE_COUNT);
2414                 GET_ATTR(UNIFIED_ADDRESSING);
2415                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2416                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2417                 GET_ATTR(CAN_TEX2D_GATHER);
2418                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2419                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2420                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2421                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2422                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2423                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2424                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2425                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2426                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2427                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2428                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2429                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2430                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2431                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2432                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2433                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2434                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2435                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2436                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2437                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2438                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2439                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2440                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2441                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2442                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2443                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2444                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2445                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2446                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2447                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2448                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2449                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2450                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2451                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2452                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2453                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2454                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2455                 GET_ATTR(MANAGED_MEMORY);
2456                 GET_ATTR(MULTI_GPU_BOARD);
2457                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2458 #undef GET_ATTR
2459                 capabilities += "\n";
2460         }
2461
2462         return capabilities;
2463 }
2464
2465 CCL_NAMESPACE_END