Merge branch 'blender2.7'
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_foreach.h"
41 #include "util/util_logging.h"
42 #include "util/util_map.h"
43 #include "util/util_md5.h"
44 #include "util/util_opengl.h"
45 #include "util/util_path.h"
46 #include "util/util_string.h"
47 #include "util/util_system.h"
48 #include "util/util_types.h"
49 #include "util/util_time.h"
50
51 #include "kernel/split/kernel_split_data_types.h"
52
53 CCL_NAMESPACE_BEGIN
54
55 #ifndef WITH_CUDA_DYNLOAD
56
57 /* Transparently implement some functions, so majority of the file does not need
58  * to worry about difference between dynamically loaded and linked CUDA at all.
59  */
60
61 namespace {
62
63 const char *cuewErrorString(CUresult result)
64 {
65         /* We can only give error code here without major code duplication, that
66          * should be enough since dynamic loading is only being disabled by folks
67          * who knows what they're doing anyway.
68          *
69          * NOTE: Avoid call from several threads.
70          */
71         static string error;
72         error = string_printf("%d", result);
73         return error.c_str();
74 }
75
76 const char *cuewCompilerPath()
77 {
78         return CYCLES_CUDA_NVCC_EXECUTABLE;
79 }
80
81 int cuewCompilerVersion()
82 {
83         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
84 }
85
86 }  /* namespace */
87 #endif  /* WITH_CUDA_DYNLOAD */
88
89 class CUDADevice;
90
91 class CUDASplitKernel : public DeviceSplitKernel {
92         CUDADevice *device;
93 public:
94         explicit CUDASplitKernel(CUDADevice *device);
95
96         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
97
98         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
99                                                     RenderTile& rtile,
100                                                     int num_global_elements,
101                                                     device_memory& kernel_globals,
102                                                     device_memory& kernel_data_,
103                                                     device_memory& split_data,
104                                                     device_memory& ray_state,
105                                                     device_memory& queue_index,
106                                                     device_memory& use_queues_flag,
107                                                     device_memory& work_pool_wgs);
108
109         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
110                                                                const DeviceRequestedFeatures&);
111         virtual int2 split_kernel_local_size();
112         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
113 };
114
115 /* Utility to push/pop CUDA context. */
116 class CUDAContextScope {
117 public:
118         CUDAContextScope(CUDADevice *device);
119         ~CUDAContextScope();
120
121 private:
122         CUDADevice *device;
123 };
124
125 class CUDADevice : public Device
126 {
127 public:
128         DedicatedTaskPool task_pool;
129         CUdevice cuDevice;
130         CUcontext cuContext;
131         CUmodule cuModule, cuFilterModule;
132         size_t device_texture_headroom;
133         size_t device_working_headroom;
134         bool move_texture_to_host;
135         size_t map_host_used;
136         size_t map_host_limit;
137         int can_map_host;
138         int cuDevId;
139         int cuDevArchitecture;
140         bool first_error;
141         CUDASplitKernel *split_kernel;
142
143         struct CUDAMem {
144                 CUDAMem()
145                 : texobject(0), array(0), map_host_pointer(0), free_map_host(false) {}
146
147                 CUtexObject texobject;
148                 CUarray array;
149                 void *map_host_pointer;
150                 bool free_map_host;
151         };
152         typedef map<device_memory*, CUDAMem> CUDAMemMap;
153         CUDAMemMap cuda_mem_map;
154
155         struct PixelMem {
156                 GLuint cuPBO;
157                 CUgraphicsResource cuPBOresource;
158                 GLuint cuTexId;
159                 int w, h;
160         };
161         map<device_ptr, PixelMem> pixel_mem_map;
162
163         /* Bindless Textures */
164         device_vector<TextureInfo> texture_info;
165         bool need_texture_info;
166
167         CUdeviceptr cuda_device_ptr(device_ptr mem)
168         {
169                 return (CUdeviceptr)mem;
170         }
171
172         static bool have_precompiled_kernels()
173         {
174                 string cubins_path = path_get("lib");
175                 return path_exists(cubins_path);
176         }
177
178         virtual bool show_samples() const
179         {
180                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
181                 return true;
182         }
183
184         virtual BVHLayoutMask get_bvh_layout_mask() const {
185                 return BVH_LAYOUT_BVH2;
186         }
187
188 /*#ifdef NDEBUG
189 #define cuda_abort()
190 #else
191 #define cuda_abort() abort()
192 #endif*/
193         void cuda_error_documentation()
194         {
195                 if(first_error) {
196                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
197                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
198                         first_error = false;
199                 }
200         }
201
202 #define cuda_assert(stmt) \
203         { \
204                 CUresult result = stmt; \
205                 \
206                 if(result != CUDA_SUCCESS) { \
207                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
208                         if(error_msg == "") \
209                                 error_msg = message; \
210                         fprintf(stderr, "%s\n", message.c_str()); \
211                         /*cuda_abort();*/ \
212                         cuda_error_documentation(); \
213                 } \
214         } (void) 0
215
216         bool cuda_error_(CUresult result, const string& stmt)
217         {
218                 if(result == CUDA_SUCCESS)
219                         return false;
220
221                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
222                 if(error_msg == "")
223                         error_msg = message;
224                 fprintf(stderr, "%s\n", message.c_str());
225                 cuda_error_documentation();
226                 return true;
227         }
228
229 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
230
231         void cuda_error_message(const string& message)
232         {
233                 if(error_msg == "")
234                         error_msg = message;
235                 fprintf(stderr, "%s\n", message.c_str());
236                 cuda_error_documentation();
237         }
238
239         CUDADevice(DeviceInfo& info, Stats &stats, Profiler &profiler, bool background_)
240         : Device(info, stats, profiler, background_),
241           texture_info(this, "__texture_info", MEM_TEXTURE)
242         {
243                 first_error = true;
244                 background = background_;
245
246                 cuDevId = info.num;
247                 cuDevice = 0;
248                 cuContext = 0;
249
250                 cuModule = 0;
251                 cuFilterModule = 0;
252
253                 split_kernel = NULL;
254
255                 need_texture_info = false;
256
257                 device_texture_headroom = 0;
258                 device_working_headroom = 0;
259                 move_texture_to_host = false;
260                 map_host_limit = 0;
261                 map_host_used = 0;
262                 can_map_host = 0;
263
264                 /* Intialize CUDA. */
265                 if(cuda_error(cuInit(0)))
266                         return;
267
268                 /* Setup device and context. */
269                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
270                         return;
271
272                 /* CU_CTX_MAP_HOST for mapping host memory when out of device memory.
273                  * CU_CTX_LMEM_RESIZE_TO_MAX for reserving local memory ahead of render,
274                  * so we can predict which memory to map to host. */
275                 cuda_assert(cuDeviceGetAttribute(&can_map_host, CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, cuDevice));
276
277                 unsigned int ctx_flags = CU_CTX_LMEM_RESIZE_TO_MAX;
278                 if(can_map_host) {
279                         ctx_flags |= CU_CTX_MAP_HOST;
280                         init_host_memory();
281                 }
282
283                 /* Create context. */
284                 CUresult result;
285
286                 if(background) {
287                         result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
288                 }
289                 else {
290                         result = cuGLCtxCreate(&cuContext, ctx_flags, cuDevice);
291
292                         if(result != CUDA_SUCCESS) {
293                                 result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
294                                 background = true;
295                         }
296                 }
297
298                 if(cuda_error_(result, "cuCtxCreate"))
299                         return;
300
301                 int major, minor;
302                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
303                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
304                 cuDevArchitecture = major*100 + minor*10;
305
306                 /* Pop context set by cuCtxCreate. */
307                 cuCtxPopCurrent(NULL);
308         }
309
310         ~CUDADevice()
311         {
312                 task_pool.stop();
313
314                 delete split_kernel;
315
316                 texture_info.free();
317
318                 cuda_assert(cuCtxDestroy(cuContext));
319         }
320
321         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
322         {
323                 int major, minor;
324                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
325                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
326
327                 /* We only support sm_30 and above */
328                 if(major < 3) {
329                         cuda_error_message(string_printf("CUDA device supported only with compute capability 3.0 or up, found %d.%d.", major, minor));
330                         return false;
331                 }
332
333                 return true;
334         }
335
336         bool use_adaptive_compilation()
337         {
338                 return DebugFlags().cuda.adaptive_compile;
339         }
340
341         bool use_split_kernel()
342         {
343                 return DebugFlags().cuda.split_kernel;
344         }
345
346         /* Common NVCC flags which stays the same regardless of shading model,
347          * kernel sources md5 and only depends on compiler or compilation settings.
348          */
349         string compile_kernel_get_common_cflags(
350                 const DeviceRequestedFeatures& requested_features,
351                 bool filter=false, bool split=false)
352         {
353                 const int machine = system_cpu_bits();
354                 const string source_path = path_get("source");
355                 const string include_path = source_path;
356                 string cflags = string_printf("-m%d "
357                                               "--ptxas-options=\"-v\" "
358                                               "--use_fast_math "
359                                               "-DNVCC "
360                                                "-I\"%s\"",
361                                               machine,
362                                               include_path.c_str());
363                 if(!filter && use_adaptive_compilation()) {
364                         cflags += " " + requested_features.get_build_options();
365                 }
366                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
367                 if(extra_cflags) {
368                         cflags += string(" ") + string(extra_cflags);
369                 }
370 #ifdef WITH_CYCLES_DEBUG
371                 cflags += " -D__KERNEL_DEBUG__";
372 #endif
373
374                 if(split) {
375                         cflags += " -D__SPLIT__";
376                 }
377
378                 return cflags;
379         }
380
381         bool compile_check_compiler() {
382                 const char *nvcc = cuewCompilerPath();
383                 if(nvcc == NULL) {
384                         cuda_error_message("CUDA nvcc compiler not found. "
385                                            "Install CUDA toolkit in default location.");
386                         return false;
387                 }
388                 const int cuda_version = cuewCompilerVersion();
389                 VLOG(1) << "Found nvcc " << nvcc
390                         << ", CUDA version " << cuda_version
391                         << ".";
392                 const int major = cuda_version / 10, minor = cuda_version % 10;
393                 if(cuda_version == 0) {
394                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
395                         return false;
396                 }
397                 if(cuda_version < 80) {
398                         printf("Unsupported CUDA version %d.%d detected, "
399                                "you need CUDA 8.0 or newer.\n",
400                                major, minor);
401                         return false;
402                 }
403                 else if(cuda_version != 80) {
404                         printf("CUDA version %d.%d detected, build may succeed but only "
405                                "CUDA 8.0 is officially supported.\n",
406                                major, minor);
407                 }
408                 return true;
409         }
410
411         string compile_kernel(const DeviceRequestedFeatures& requested_features,
412                               bool filter=false, bool split=false)
413         {
414                 const char *name, *source;
415                 if(filter) {
416                         name = "filter";
417                         source = "filter.cu";
418                 }
419                 else if(split) {
420                         name = "kernel_split";
421                         source = "kernel_split.cu";
422                 }
423                 else {
424                         name = "kernel";
425                         source = "kernel.cu";
426                 }
427                 /* Compute cubin name. */
428                 int major, minor;
429                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
430                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
431
432                 /* Attempt to use kernel provided with Blender. */
433                 if(!use_adaptive_compilation()) {
434                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
435                                                                     name, major, minor));
436                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
437                         if(path_exists(cubin)) {
438                                 VLOG(1) << "Using precompiled kernel.";
439                                 return cubin;
440                         }
441                 }
442
443                 const string common_cflags =
444                         compile_kernel_get_common_cflags(requested_features, filter, split);
445
446                 /* Try to use locally compiled kernel. */
447                 const string source_path = path_get("source");
448                 const string kernel_md5 = path_files_md5_hash(source_path);
449
450                 /* We include cflags into md5 so changing cuda toolkit or changing other
451                  * compiler command line arguments makes sure cubin gets re-built.
452                  */
453                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
454
455                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
456                                                         name, major, minor,
457                                                         cubin_md5.c_str());
458                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
459                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
460                 if(path_exists(cubin)) {
461                         VLOG(1) << "Using locally compiled kernel.";
462                         return cubin;
463                 }
464
465 #ifdef _WIN32
466                 if(have_precompiled_kernels()) {
467                         if(major < 3) {
468                                 cuda_error_message(string_printf(
469                                         "CUDA device requires compute capability 3.0 or up, "
470                                         "found %d.%d. Your GPU is not supported.",
471                                         major, minor));
472                         }
473                         else {
474                                 cuda_error_message(string_printf(
475                                         "CUDA binary kernel for this graphics card compute "
476                                         "capability (%d.%d) not found.",
477                                         major, minor));
478                         }
479                         return "";
480                 }
481 #endif
482
483                 /* Compile. */
484                 if(!compile_check_compiler()) {
485                         return "";
486                 }
487                 const char *nvcc = cuewCompilerPath();
488                 const string kernel = path_join(
489                         path_join(source_path, "kernel"),
490                         path_join("kernels",
491                                   path_join("cuda", source)));
492                 double starttime = time_dt();
493                 printf("Compiling CUDA kernel ...\n");
494
495                 path_create_directories(cubin);
496
497                 string command = string_printf("\"%s\" "
498                                                "-arch=sm_%d%d "
499                                                "--cubin \"%s\" "
500                                                "-o \"%s\" "
501                                                "%s ",
502                                                nvcc,
503                                                major, minor,
504                                                kernel.c_str(),
505                                                cubin.c_str(),
506                                                common_cflags.c_str());
507
508                 printf("%s\n", command.c_str());
509
510                 if(system(command.c_str()) == -1) {
511                         cuda_error_message("Failed to execute compilation command, "
512                                            "see console for details.");
513                         return "";
514                 }
515
516                 /* Verify if compilation succeeded */
517                 if(!path_exists(cubin)) {
518                         cuda_error_message("CUDA kernel compilation failed, "
519                                            "see console for details.");
520                         return "";
521                 }
522
523                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
524
525                 return cubin;
526         }
527
528         bool load_kernels(const DeviceRequestedFeatures& requested_features)
529         {
530                 /* TODO(sergey): Support kernels re-load for CUDA devices.
531                  *
532                  * Currently re-loading kernel will invalidate memory pointers,
533                  * causing problems in cuCtxSynchronize.
534                  */
535                 if(cuFilterModule && cuModule) {
536                         VLOG(1) << "Skipping kernel reload, not currently supported.";
537                         return true;
538                 }
539
540                 /* check if cuda init succeeded */
541                 if(cuContext == 0)
542                         return false;
543
544                 /* check if GPU is supported */
545                 if(!support_device(requested_features))
546                         return false;
547
548                 /* get kernel */
549                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
550                 if(cubin == "")
551                         return false;
552
553                 string filter_cubin = compile_kernel(requested_features, true, false);
554                 if(filter_cubin == "")
555                         return false;
556
557                 /* open module */
558                 CUDAContextScope scope(this);
559
560                 string cubin_data;
561                 CUresult result;
562
563                 if(path_read_text(cubin, cubin_data))
564                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
565                 else
566                         result = CUDA_ERROR_FILE_NOT_FOUND;
567
568                 if(cuda_error_(result, "cuModuleLoad"))
569                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
570
571                 if(path_read_text(filter_cubin, cubin_data))
572                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
573                 else
574                         result = CUDA_ERROR_FILE_NOT_FOUND;
575
576                 if(cuda_error_(result, "cuModuleLoad"))
577                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
578
579                 if(result == CUDA_SUCCESS) {
580                         reserve_local_memory(requested_features);
581                 }
582
583                 return (result == CUDA_SUCCESS);
584         }
585
586         void reserve_local_memory(const DeviceRequestedFeatures& requested_features)
587         {
588                 if(use_split_kernel()) {
589                         /* Split kernel mostly uses global memory and adaptive compilation,
590                          * difficult to predict how much is needed currently. */
591                         return;
592                 }
593
594                 /* Together with CU_CTX_LMEM_RESIZE_TO_MAX, this reserves local memory
595                  * needed for kernel launches, so that we can reliably figure out when
596                  * to allocate scene data in mapped host memory. */
597                 CUDAContextScope scope(this);
598
599                 size_t total = 0, free_before = 0, free_after = 0;
600                 cuMemGetInfo(&free_before, &total);
601
602                 /* Get kernel function. */
603                 CUfunction cuPathTrace;
604
605                 if(requested_features.use_integrator_branched) {
606                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
607                 }
608                 else {
609                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
610                 }
611
612                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
613
614                 int min_blocks, num_threads_per_block;
615                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
616
617                 /* Launch kernel, using just 1 block appears sufficient to reserve
618                  * memory for all multiprocessors. It would be good to do this in
619                  * parallel for the multi GPU case still to make it faster. */
620                 CUdeviceptr d_work_tiles = 0;
621                 uint total_work_size = 0;
622
623                 void *args[] = {&d_work_tiles,
624                                 &total_work_size};
625
626                 cuda_assert(cuLaunchKernel(cuPathTrace,
627                                            1, 1, 1,
628                                            num_threads_per_block, 1, 1,
629                                            0, 0, args, 0));
630
631                 cuda_assert(cuCtxSynchronize());
632
633                 cuMemGetInfo(&free_after, &total);
634                 VLOG(1) << "Local memory reserved "
635                         << string_human_readable_number(free_before - free_after) << " bytes. ("
636                         << string_human_readable_size(free_before - free_after) << ")";
637
638 #if 0
639                 /* For testing mapped host memory, fill up device memory. */
640                 const size_t keep_mb = 1024;
641
642                 while(free_after > keep_mb * 1024 * 1024LL) {
643                         CUdeviceptr tmp;
644                         cuda_assert(cuMemAlloc(&tmp, 10 * 1024 * 1024LL));
645                         cuMemGetInfo(&free_after, &total);
646                 }
647 #endif
648         }
649
650         void init_host_memory()
651         {
652                 /* Limit amount of host mapped memory, because allocating too much can
653                  * cause system instability. Leave at least half or 4 GB of system
654                  * memory free, whichever is smaller. */
655                 size_t default_limit = 4 * 1024 * 1024 * 1024LL;
656                 size_t system_ram = system_physical_ram();
657
658                 if(system_ram > 0) {
659                         if(system_ram / 2 > default_limit) {
660                                 map_host_limit = system_ram - default_limit;
661                         }
662                         else {
663                                 map_host_limit = system_ram / 2;
664                         }
665                 }
666                 else {
667                         VLOG(1) << "Mapped host memory disabled, failed to get system RAM";
668                         map_host_limit = 0;
669                 }
670
671                 /* Amount of device memory to keep is free after texture memory
672                  * and working memory allocations respectively. We set the working
673                  * memory limit headroom lower so that some space is left after all
674                  * texture memory allocations. */
675                 device_working_headroom = 32 * 1024 * 1024LL; // 32MB
676                 device_texture_headroom = 128 * 1024 * 1024LL; // 128MB
677
678                 VLOG(1) << "Mapped host memory limit set to "
679                         << string_human_readable_number(map_host_limit) << " bytes. ("
680                         << string_human_readable_size(map_host_limit) << ")";
681         }
682
683         void load_texture_info()
684         {
685                 if(need_texture_info) {
686                         texture_info.copy_to_device();
687                         need_texture_info = false;
688                 }
689         }
690
691         void move_textures_to_host(size_t size, bool for_texture)
692         {
693                 /* Signal to reallocate textures in host memory only. */
694                 move_texture_to_host = true;
695
696                 while(size > 0) {
697                         /* Find suitable memory allocation to move. */
698                         device_memory *max_mem = NULL;
699                         size_t max_size = 0;
700                         bool max_is_image = false;
701
702                         foreach(CUDAMemMap::value_type& pair, cuda_mem_map) {
703                                 device_memory& mem = *pair.first;
704                                 CUDAMem *cmem = &pair.second;
705
706                                 bool is_texture = (mem.type == MEM_TEXTURE) && (&mem != &texture_info);
707                                 bool is_image = is_texture && (mem.data_height > 1);
708
709                                 /* Can't move this type of memory. */
710                                 if(!is_texture || cmem->array) {
711                                         continue;
712                                 }
713
714                                 /* Already in host memory. */
715                                 if(cmem->map_host_pointer) {
716                                         continue;
717                                 }
718
719                                 /* For other textures, only move image textures. */
720                                 if(for_texture && !is_image) {
721                                         continue;
722                                 }
723
724                                 /* Try to move largest allocation, prefer moving images. */
725                                 if(is_image > max_is_image ||
726                                    (is_image == max_is_image && mem.device_size > max_size)) {
727                                         max_is_image = is_image;
728                                         max_size = mem.device_size;
729                                         max_mem = &mem;
730                                 }
731                         }
732
733                         /* Move to host memory. This part is mutex protected since
734                          * multiple CUDA devices could be moving the memory. The
735                          * first one will do it, and the rest will adopt the pointer. */
736                         if(max_mem) {
737                                 VLOG(1) << "Move memory from device to host: " << max_mem->name;
738
739                                 static thread_mutex move_mutex;
740                                 thread_scoped_lock lock(move_mutex);
741
742                                 /* Preserve the original device pointer, in case of multi device
743                                  * we can't change it because the pointer mapping would break. */
744                                 device_ptr prev_pointer = max_mem->device_pointer;
745                                 size_t prev_size = max_mem->device_size;
746
747                                 tex_free(*max_mem);
748                                 tex_alloc(*max_mem);
749                                 size = (max_size >= size)? 0: size - max_size;
750
751                                 max_mem->device_pointer = prev_pointer;
752                                 max_mem->device_size = prev_size;
753                         }
754                         else {
755                                 break;
756                         }
757                 }
758
759                 /* Update texture info array with new pointers. */
760                 load_texture_info();
761
762                 move_texture_to_host = false;
763         }
764
765         CUDAMem *generic_alloc(device_memory& mem, size_t pitch_padding = 0)
766         {
767                 CUDAContextScope scope(this);
768
769                 CUdeviceptr device_pointer = 0;
770                 size_t size = mem.memory_size() + pitch_padding;
771
772                 CUresult mem_alloc_result = CUDA_ERROR_OUT_OF_MEMORY;
773                 const char *status = "";
774
775                 /* First try allocating in device memory, respecting headroom. We make
776                  * an exception for texture info. It is small and frequently accessed,
777                  * so treat it as working memory.
778                  *
779                  * If there is not enough room for working memory, we will try to move
780                  * textures to host memory, assuming the performance impact would have
781                  * been worse for working memory. */
782                 bool is_texture = (mem.type == MEM_TEXTURE) && (&mem != &texture_info);
783                 bool is_image = is_texture && (mem.data_height > 1);
784
785                 size_t headroom = (is_texture)? device_texture_headroom:
786                                                 device_working_headroom;
787
788                 size_t total = 0, free = 0;
789                 cuMemGetInfo(&free, &total);
790
791                 /* Move textures to host memory if needed. */
792                 if(!move_texture_to_host && !is_image && (size + headroom) >= free) {
793                         move_textures_to_host(size + headroom - free, is_texture);
794                         cuMemGetInfo(&free, &total);
795                 }
796
797                 /* Allocate in device memory. */
798                 if(!move_texture_to_host && (size + headroom) < free) {
799                         mem_alloc_result = cuMemAlloc(&device_pointer, size);
800                         if(mem_alloc_result == CUDA_SUCCESS) {
801                                 status = " in device memory";
802                         }
803                 }
804
805                 /* Fall back to mapped host memory if needed and possible. */
806                 void *map_host_pointer = 0;
807                 bool free_map_host = false;
808
809                 if(mem_alloc_result != CUDA_SUCCESS && can_map_host &&
810                    map_host_used + size < map_host_limit) {
811                         if(mem.shared_pointer) {
812                                 /* Another device already allocated host memory. */
813                                 mem_alloc_result = CUDA_SUCCESS;
814                                 map_host_pointer = mem.shared_pointer;
815                         }
816                         else {
817                                 /* Allocate host memory ourselves. */
818                                 mem_alloc_result = cuMemHostAlloc(&map_host_pointer, size,
819                                                                   CU_MEMHOSTALLOC_DEVICEMAP |
820                                                                   CU_MEMHOSTALLOC_WRITECOMBINED);
821                                 mem.shared_pointer = map_host_pointer;
822                                 free_map_host = true;
823                         }
824
825                         if(mem_alloc_result == CUDA_SUCCESS) {
826                                 cuda_assert(cuMemHostGetDevicePointer_v2(&device_pointer, mem.shared_pointer, 0));
827                                 map_host_used += size;
828                                 status = " in host memory";
829
830                                 /* Replace host pointer with our host allocation. Only works if
831                                  * CUDA memory layout is the same and has no pitch padding. Also
832                                  * does not work if we move textures to host during a render,
833                                  * since other devices might be using the memory. */
834                                 if(!move_texture_to_host && pitch_padding == 0 &&
835                                    mem.host_pointer && mem.host_pointer != mem.shared_pointer) {
836                                         memcpy(mem.shared_pointer, mem.host_pointer, size);
837                                         mem.host_free();
838                                         mem.host_pointer = mem.shared_pointer;
839                                 }
840                         }
841                         else {
842                                 status = " failed, out of host memory";
843                         }
844                 }
845                 else if(mem_alloc_result != CUDA_SUCCESS) {
846                         status = " failed, out of device and host memory";
847                 }
848
849                 if(mem_alloc_result != CUDA_SUCCESS) {
850                         cuda_assert(mem_alloc_result);
851                 }
852
853                 if(mem.name) {
854                         VLOG(1) << "Buffer allocate: " << mem.name << ", "
855                                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
856                                         << string_human_readable_size(mem.memory_size()) << ")"
857                                         << status;
858                 }
859
860                 mem.device_pointer = (device_ptr)device_pointer;
861                 mem.device_size = size;
862                 stats.mem_alloc(size);
863
864                 if(!mem.device_pointer) {
865                         return NULL;
866                 }
867
868                 /* Insert into map of allocations. */
869                 CUDAMem *cmem = &cuda_mem_map[&mem];
870                 cmem->map_host_pointer = map_host_pointer;
871                 cmem->free_map_host = free_map_host;
872                 return cmem;
873         }
874
875         void generic_copy_to(device_memory& mem)
876         {
877                 if(mem.host_pointer && mem.device_pointer) {
878                         CUDAContextScope scope(this);
879
880                         if(mem.host_pointer != mem.shared_pointer) {
881                                 cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer),
882                                                          mem.host_pointer,
883                                                          mem.memory_size()));
884                         }
885                 }
886         }
887
888         void generic_free(device_memory& mem)
889         {
890                 if(mem.device_pointer) {
891                         CUDAContextScope scope(this);
892                         const CUDAMem& cmem = cuda_mem_map[&mem];
893
894                         if(cmem.map_host_pointer) {
895                                 /* Free host memory. */
896                                 if(cmem.free_map_host) {
897                                         cuMemFreeHost(cmem.map_host_pointer);
898                                         if(mem.host_pointer == mem.shared_pointer) {
899                                                 mem.host_pointer = 0;
900                                         }
901                                         mem.shared_pointer = 0;
902                                 }
903
904                                 map_host_used -= mem.device_size;
905                         }
906                         else {
907                                 /* Free device memory. */
908                                 cuMemFree(mem.device_pointer);
909                         }
910
911                         stats.mem_free(mem.device_size);
912                         mem.device_pointer = 0;
913                         mem.device_size = 0;
914
915                         cuda_mem_map.erase(cuda_mem_map.find(&mem));
916                 }
917         }
918
919         void mem_alloc(device_memory& mem)
920         {
921                 if(mem.type == MEM_PIXELS && !background) {
922                         pixels_alloc(mem);
923                 }
924                 else if(mem.type == MEM_TEXTURE) {
925                         assert(!"mem_alloc not supported for textures.");
926                 }
927                 else {
928                         generic_alloc(mem);
929                 }
930         }
931
932         void mem_copy_to(device_memory& mem)
933         {
934                 if(mem.type == MEM_PIXELS) {
935                         assert(!"mem_copy_to not supported for pixels.");
936                 }
937                 else if(mem.type == MEM_TEXTURE) {
938                         tex_free(mem);
939                         tex_alloc(mem);
940                 }
941                 else {
942                         if(!mem.device_pointer) {
943                                 generic_alloc(mem);
944                         }
945
946                         generic_copy_to(mem);
947                 }
948         }
949
950         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
951         {
952                 if(mem.type == MEM_PIXELS && !background) {
953                         pixels_copy_from(mem, y, w, h);
954                 }
955                 else if(mem.type == MEM_TEXTURE) {
956                         assert(!"mem_copy_from not supported for textures.");
957                 }
958                 else {
959                         CUDAContextScope scope(this);
960                         size_t offset = elem*y*w;
961                         size_t size = elem*w*h;
962
963                         if(mem.host_pointer && mem.device_pointer) {
964                                 cuda_assert(cuMemcpyDtoH((uchar*)mem.host_pointer + offset,
965                                                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
966                         }
967                         else if(mem.host_pointer) {
968                                 memset((char*)mem.host_pointer + offset, 0, size);
969                         }
970                 }
971         }
972
973         void mem_zero(device_memory& mem)
974         {
975                 if(!mem.device_pointer) {
976                         mem_alloc(mem);
977                 }
978
979                 if(mem.host_pointer) {
980                         memset(mem.host_pointer, 0, mem.memory_size());
981                 }
982
983                 if(mem.device_pointer &&
984                    (!mem.host_pointer || mem.host_pointer != mem.shared_pointer)) {
985                         CUDAContextScope scope(this);
986                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
987                 }
988         }
989
990         void mem_free(device_memory& mem)
991         {
992                 if(mem.type == MEM_PIXELS && !background) {
993                         pixels_free(mem);
994                 }
995                 else if(mem.type == MEM_TEXTURE) {
996                         tex_free(mem);
997                 }
998                 else {
999                         generic_free(mem);
1000                 }
1001         }
1002
1003         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
1004         {
1005                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
1006         }
1007
1008         void const_copy_to(const char *name, void *host, size_t size)
1009         {
1010                 CUDAContextScope scope(this);
1011                 CUdeviceptr mem;
1012                 size_t bytes;
1013
1014                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
1015                 //assert(bytes == size);
1016                 cuda_assert(cuMemcpyHtoD(mem, host, size));
1017         }
1018
1019         void tex_alloc(device_memory& mem)
1020         {
1021                 CUDAContextScope scope(this);
1022
1023                 /* General variables for both architectures */
1024                 string bind_name = mem.name;
1025                 size_t dsize = datatype_size(mem.data_type);
1026                 size_t size = mem.memory_size();
1027
1028                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
1029                 switch(mem.extension) {
1030                         case EXTENSION_REPEAT:
1031                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
1032                                 break;
1033                         case EXTENSION_EXTEND:
1034                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
1035                                 break;
1036                         case EXTENSION_CLIP:
1037                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
1038                                 break;
1039                         default:
1040                                 assert(0);
1041                                 break;
1042                 }
1043
1044                 CUfilter_mode filter_mode;
1045                 if(mem.interpolation == INTERPOLATION_CLOSEST) {
1046                         filter_mode = CU_TR_FILTER_MODE_POINT;
1047                 }
1048                 else {
1049                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
1050                 }
1051
1052                 /* Data Storage */
1053                 if(mem.interpolation == INTERPOLATION_NONE) {
1054                         generic_alloc(mem);
1055                         generic_copy_to(mem);
1056
1057                         CUdeviceptr cumem;
1058                         size_t cubytes;
1059
1060                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
1061
1062                         if(cubytes == 8) {
1063                                 /* 64 bit device pointer */
1064                                 uint64_t ptr = mem.device_pointer;
1065                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
1066                         }
1067                         else {
1068                                 /* 32 bit device pointer */
1069                                 uint32_t ptr = (uint32_t)mem.device_pointer;
1070                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
1071                         }
1072                         return;
1073                 }
1074
1075                 /* Image Texture Storage */
1076                 CUarray_format_enum format;
1077                 switch(mem.data_type) {
1078                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
1079                         case TYPE_UINT16: format = CU_AD_FORMAT_UNSIGNED_INT16; break;
1080                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
1081                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
1082                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
1083                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
1084                         default: assert(0); return;
1085                 }
1086
1087                 CUDAMem *cmem = NULL;
1088                 CUarray array_3d = NULL;
1089                 size_t src_pitch = mem.data_width * dsize * mem.data_elements;
1090                 size_t dst_pitch = src_pitch;
1091
1092                 if(mem.data_depth > 1) {
1093                         /* 3D texture using array, there is no API for linear memory. */
1094                         CUDA_ARRAY3D_DESCRIPTOR desc;
1095
1096                         desc.Width = mem.data_width;
1097                         desc.Height = mem.data_height;
1098                         desc.Depth = mem.data_depth;
1099                         desc.Format = format;
1100                         desc.NumChannels = mem.data_elements;
1101                         desc.Flags = 0;
1102
1103                         VLOG(1) << "Array 3D allocate: " << mem.name << ", "
1104                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
1105                                 << string_human_readable_size(mem.memory_size()) << ")";
1106
1107                         cuda_assert(cuArray3DCreate(&array_3d, &desc));
1108
1109                         if(!array_3d) {
1110                                 return;
1111                         }
1112
1113                         CUDA_MEMCPY3D param;
1114                         memset(&param, 0, sizeof(param));
1115                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
1116                         param.dstArray = array_3d;
1117                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
1118                         param.srcHost = mem.host_pointer;
1119                         param.srcPitch = src_pitch;
1120                         param.WidthInBytes = param.srcPitch;
1121                         param.Height = mem.data_height;
1122                         param.Depth = mem.data_depth;
1123
1124                         cuda_assert(cuMemcpy3D(&param));
1125
1126                         mem.device_pointer = (device_ptr)array_3d;
1127                         mem.device_size = size;
1128                         stats.mem_alloc(size);
1129
1130                         cmem = &cuda_mem_map[&mem];
1131                         cmem->texobject = 0;
1132                         cmem->array = array_3d;
1133                 }
1134                 else if(mem.data_height > 0) {
1135                         /* 2D texture, using pitch aligned linear memory. */
1136                         int alignment = 0;
1137                         cuda_assert(cuDeviceGetAttribute(&alignment, CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT, cuDevice));
1138                         dst_pitch = align_up(src_pitch, alignment);
1139                         size_t dst_size = dst_pitch * mem.data_height;
1140
1141                         cmem = generic_alloc(mem, dst_size - mem.memory_size());
1142                         if(!cmem) {
1143                                 return;
1144                         }
1145
1146                         CUDA_MEMCPY2D param;
1147                         memset(&param, 0, sizeof(param));
1148                         param.dstMemoryType = CU_MEMORYTYPE_DEVICE;
1149                         param.dstDevice = mem.device_pointer;
1150                         param.dstPitch = dst_pitch;
1151                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
1152                         param.srcHost = mem.host_pointer;
1153                         param.srcPitch = src_pitch;
1154                         param.WidthInBytes = param.srcPitch;
1155                         param.Height = mem.data_height;
1156
1157                         cuda_assert(cuMemcpy2DUnaligned(&param));
1158                 }
1159                 else {
1160                         /* 1D texture, using linear memory. */
1161                         cmem = generic_alloc(mem);
1162                         if(!cmem) {
1163                                 return;
1164                         }
1165
1166                         cuda_assert(cuMemcpyHtoD(mem.device_pointer, mem.host_pointer, size));
1167                 }
1168
1169                 /* Kepler+, bindless textures. */
1170                 int flat_slot = 0;
1171                 if(string_startswith(mem.name, "__tex_image")) {
1172                         int pos =  string(mem.name).rfind("_");
1173                         flat_slot = atoi(mem.name + pos + 1);
1174                 }
1175                 else {
1176                         assert(0);
1177                 }
1178
1179                 CUDA_RESOURCE_DESC resDesc;
1180                 memset(&resDesc, 0, sizeof(resDesc));
1181
1182                 if(array_3d) {
1183                         resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
1184                         resDesc.res.array.hArray = array_3d;
1185                         resDesc.flags = 0;
1186                 }
1187                 else if(mem.data_height > 0) {
1188                         resDesc.resType = CU_RESOURCE_TYPE_PITCH2D;
1189                         resDesc.res.pitch2D.devPtr = mem.device_pointer;
1190                         resDesc.res.pitch2D.format = format;
1191                         resDesc.res.pitch2D.numChannels = mem.data_elements;
1192                         resDesc.res.pitch2D.height = mem.data_height;
1193                         resDesc.res.pitch2D.width = mem.data_width;
1194                         resDesc.res.pitch2D.pitchInBytes = dst_pitch;
1195                 }
1196                 else {
1197                         resDesc.resType = CU_RESOURCE_TYPE_LINEAR;
1198                         resDesc.res.linear.devPtr = mem.device_pointer;
1199                         resDesc.res.linear.format = format;
1200                         resDesc.res.linear.numChannels = mem.data_elements;
1201                         resDesc.res.linear.sizeInBytes = mem.device_size;
1202                 }
1203
1204                 CUDA_TEXTURE_DESC texDesc;
1205                 memset(&texDesc, 0, sizeof(texDesc));
1206                 texDesc.addressMode[0] = address_mode;
1207                 texDesc.addressMode[1] = address_mode;
1208                 texDesc.addressMode[2] = address_mode;
1209                 texDesc.filterMode = filter_mode;
1210                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
1211
1212                 cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
1213
1214                 /* Resize once */
1215                 if(flat_slot >= texture_info.size()) {
1216                         /* Allocate some slots in advance, to reduce amount
1217                          * of re-allocations. */
1218                         texture_info.resize(flat_slot + 128);
1219                 }
1220
1221                 /* Set Mapping and tag that we need to (re-)upload to device */
1222                 TextureInfo& info = texture_info[flat_slot];
1223                 info.data = (uint64_t)cmem->texobject;
1224                 info.cl_buffer = 0;
1225                 info.interpolation = mem.interpolation;
1226                 info.extension = mem.extension;
1227                 info.width = mem.data_width;
1228                 info.height = mem.data_height;
1229                 info.depth = mem.data_depth;
1230                 need_texture_info = true;
1231         }
1232
1233         void tex_free(device_memory& mem)
1234         {
1235                 if(mem.device_pointer) {
1236                         CUDAContextScope scope(this);
1237                         const CUDAMem& cmem = cuda_mem_map[&mem];
1238
1239                         if(cmem.texobject) {
1240                                 /* Free bindless texture. */
1241                                 cuTexObjectDestroy(cmem.texobject);
1242                         }
1243
1244                         if(cmem.array) {
1245                                 /* Free array. */
1246                                 cuArrayDestroy(cmem.array);
1247                                 stats.mem_free(mem.device_size);
1248                                 mem.device_pointer = 0;
1249                                 mem.device_size = 0;
1250
1251                                 cuda_mem_map.erase(cuda_mem_map.find(&mem));
1252                         }
1253                         else {
1254                                 generic_free(mem);
1255                         }
1256                 }
1257         }
1258
1259 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
1260                         int threads_per_block;                                                                              \
1261                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1262                         int threads = (int)sqrt((float)threads_per_block);                                                  \
1263                         int xblocks = ((w) + threads - 1)/threads;                                                          \
1264                         int yblocks = ((h) + threads - 1)/threads;
1265
1266 #define CUDA_LAUNCH_KERNEL(func, args)                      \
1267                         cuda_assert(cuLaunchKernel(func,                \
1268                                                    xblocks, yblocks, 1, \
1269                                                    threads, threads, 1, \
1270                                                    0, 0, args, 0));
1271
1272 /* Similar as above, but for 1-dimensional blocks. */
1273 #define CUDA_GET_BLOCKSIZE_1D(func, w, h)                                                                       \
1274                         int threads_per_block;                                                                              \
1275                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1276                         int xblocks = ((w) + threads_per_block - 1)/threads_per_block;                                      \
1277                         int yblocks = h;
1278
1279 #define CUDA_LAUNCH_KERNEL_1D(func, args)                       \
1280                         cuda_assert(cuLaunchKernel(func,                    \
1281                                                    xblocks, yblocks, 1,     \
1282                                                    threads_per_block, 1, 1, \
1283                                                    0, 0, args, 0));
1284
1285         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
1286                                        DenoisingTask *task)
1287         {
1288                 if(have_error())
1289                         return false;
1290
1291                 CUDAContextScope scope(this);
1292
1293                 int stride = task->buffer.stride;
1294                 int w = task->buffer.width;
1295                 int h = task->buffer.h;
1296                 int r = task->nlm_state.r;
1297                 int f = task->nlm_state.f;
1298                 float a = task->nlm_state.a;
1299                 float k_2 = task->nlm_state.k_2;
1300
1301                 int pass_stride = task->buffer.pass_stride;
1302                 int num_shifts = (2*r+1)*(2*r+1);
1303                 int channel_offset = task->nlm_state.is_color? task->buffer.pass_stride : 0;
1304                 int frame_offset = 0;
1305
1306                 if(have_error())
1307                         return false;
1308
1309                 CUdeviceptr difference     = cuda_device_ptr(task->buffer.temporary_mem.device_pointer);
1310                 CUdeviceptr blurDifference = difference + sizeof(float)*pass_stride*num_shifts;
1311                 CUdeviceptr weightAccum = difference + 2*sizeof(float)*pass_stride*num_shifts;
1312                 CUdeviceptr scale_ptr = 0;
1313
1314                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*pass_stride));
1315                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*pass_stride));
1316
1317                 {
1318                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput;
1319                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1320                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1321                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1322                         cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
1323
1324                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
1325                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
1326                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
1327                         cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
1328
1329                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference, w*h, num_shifts);
1330
1331                         void *calc_difference_args[] = {&guide_ptr, &variance_ptr, &scale_ptr, &difference, &w, &h, &stride, &pass_stride, &r, &channel_offset, &frame_offset, &a, &k_2};
1332                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &pass_stride, &r, &f};
1333                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &pass_stride, &r, &f};
1334                         void *update_output_args[]   = {&blurDifference, &image_ptr, &out_ptr, &weightAccum, &w, &h, &stride, &pass_stride, &channel_offset, &r, &f};
1335
1336                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1337                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1338                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1339                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1340                         CUDA_LAUNCH_KERNEL_1D(cuNLMUpdateOutput, update_output_args);
1341                 }
1342
1343                 {
1344                         CUfunction cuNLMNormalize;
1345                         cuda_assert(cuModuleGetFunction(&cuNLMNormalize, cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
1346                         cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize, CU_FUNC_CACHE_PREFER_L1));
1347                         void *normalize_args[] = {&out_ptr, &weightAccum, &w, &h, &stride};
1348                         CUDA_GET_BLOCKSIZE(cuNLMNormalize, w, h);
1349                         CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1350                         cuda_assert(cuCtxSynchronize());
1351                 }
1352
1353                 return !have_error();
1354         }
1355
1356         bool denoising_construct_transform(DenoisingTask *task)
1357         {
1358                 if(have_error())
1359                         return false;
1360
1361                 CUDAContextScope scope(this);
1362
1363                 CUfunction cuFilterConstructTransform;
1364                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1365                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1366                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1367                                    task->storage.w,
1368                                    task->storage.h);
1369
1370                 void *args[] = {&task->buffer.mem.device_pointer,
1371                                 &task->tile_info_mem.device_pointer,
1372                                 &task->storage.transform.device_pointer,
1373                                 &task->storage.rank.device_pointer,
1374                                 &task->filter_area,
1375                                 &task->rect,
1376                                 &task->radius,
1377                                 &task->pca_threshold,
1378                                 &task->buffer.pass_stride,
1379                                 &task->buffer.frame_stride,
1380                                 &task->buffer.use_time};
1381                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1382                 cuda_assert(cuCtxSynchronize());
1383
1384                 return !have_error();
1385         }
1386
1387         bool denoising_accumulate(device_ptr color_ptr,
1388                                   device_ptr color_variance_ptr,
1389                                   device_ptr scale_ptr,
1390                                   int frame,
1391                                   DenoisingTask *task)
1392         {
1393                 if(have_error())
1394                         return false;
1395
1396                 CUDAContextScope scope(this);
1397
1398                 int r = task->radius;
1399                 int f = 4;
1400                 float a = 1.0f;
1401                 float k_2 = task->nlm_k_2;
1402
1403                 int w = task->reconstruction_state.source_w;
1404                 int h = task->reconstruction_state.source_h;
1405                 int stride = task->buffer.stride;
1406                 int frame_offset = frame * task->buffer.frame_stride;
1407                 int t = task->tile_info->frames[frame];
1408
1409                 int pass_stride = task->buffer.pass_stride;
1410                 int num_shifts = (2*r+1)*(2*r+1);
1411
1412                 if(have_error())
1413                         return false;
1414
1415                 CUdeviceptr difference     = cuda_device_ptr(task->buffer.temporary_mem.device_pointer);
1416                 CUdeviceptr blurDifference = difference + sizeof(float)*pass_stride*num_shifts;
1417
1418                 CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian;
1419                 cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1420                 cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1421                 cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1422                 cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1423
1424                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1425                 cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1426                 cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1427                 cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1428
1429                 CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference,
1430                                      task->reconstruction_state.source_w * task->reconstruction_state.source_h,
1431                                      num_shifts);
1432
1433                 void *calc_difference_args[] = {&color_ptr,
1434                                                 &color_variance_ptr,
1435                                                 &scale_ptr,
1436                                                 &difference,
1437                                                 &w, &h,
1438                                                 &stride, &pass_stride,
1439                                                 &r, &pass_stride,
1440                                                 &frame_offset,
1441                                                 &a, &k_2};
1442                 void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &pass_stride, &r, &f};
1443                 void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &pass_stride, &r, &f};
1444                 void *construct_gramian_args[] = {&t,
1445                                                   &blurDifference,
1446                                                   &task->buffer.mem.device_pointer,
1447                                                   &task->storage.transform.device_pointer,
1448                                                   &task->storage.rank.device_pointer,
1449                                                   &task->storage.XtWX.device_pointer,
1450                                                   &task->storage.XtWY.device_pointer,
1451                                                   &task->reconstruction_state.filter_window,
1452                                                   &w, &h, &stride,
1453                                                   &pass_stride, &r,
1454                                                   &f,
1455                                                   &frame_offset,
1456                                                   &task->buffer.use_time};
1457
1458                 CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1459                 CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1460                 CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1461                 CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1462                 CUDA_LAUNCH_KERNEL_1D(cuNLMConstructGramian, construct_gramian_args);
1463                 cuda_assert(cuCtxSynchronize());
1464
1465                 return !have_error();
1466         }
1467
1468         bool denoising_solve(device_ptr output_ptr,
1469                              DenoisingTask *task)
1470         {
1471                 CUfunction cuFinalize;
1472                 cuda_assert(cuModuleGetFunction(&cuFinalize, cuFilterModule, "kernel_cuda_filter_finalize"));
1473                 cuda_assert(cuFuncSetCacheConfig(cuFinalize, CU_FUNC_CACHE_PREFER_L1));
1474                 void *finalize_args[] = {&output_ptr,
1475                                          &task->storage.rank.device_pointer,
1476                                          &task->storage.XtWX.device_pointer,
1477                                          &task->storage.XtWY.device_pointer,
1478                                          &task->filter_area,
1479                                          &task->reconstruction_state.buffer_params.x,
1480                                          &task->render_buffer.samples};
1481                 CUDA_GET_BLOCKSIZE(cuFinalize,
1482                                    task->reconstruction_state.source_w,
1483                                    task->reconstruction_state.source_h);
1484                 CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1485                 cuda_assert(cuCtxSynchronize());
1486
1487                 return !have_error();
1488         }
1489
1490         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1491                                       device_ptr mean_ptr, device_ptr variance_ptr,
1492                                       int r, int4 rect, DenoisingTask *task)
1493         {
1494                 if(have_error())
1495                         return false;
1496
1497                 CUDAContextScope scope(this);
1498
1499                 CUfunction cuFilterCombineHalves;
1500                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1501                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1502                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1503                                    task->rect.z-task->rect.x,
1504                                    task->rect.w-task->rect.y);
1505
1506                 void *args[] = {&mean_ptr,
1507                                 &variance_ptr,
1508                                 &a_ptr,
1509                                 &b_ptr,
1510                                 &rect,
1511                                 &r};
1512                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1513                 cuda_assert(cuCtxSynchronize());
1514
1515                 return !have_error();
1516         }
1517
1518         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1519                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1520                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1521         {
1522                 if(have_error())
1523                         return false;
1524
1525                 CUDAContextScope scope(this);
1526
1527                 CUfunction cuFilterDivideShadow;
1528                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1529                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1530                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1531                                    task->rect.z-task->rect.x,
1532                                    task->rect.w-task->rect.y);
1533
1534                 void *args[] = {&task->render_buffer.samples,
1535                                 &task->tile_info_mem.device_pointer,
1536                                 &a_ptr,
1537                                 &b_ptr,
1538                                 &sample_variance_ptr,
1539                                 &sv_variance_ptr,
1540                                 &buffer_variance_ptr,
1541                                 &task->rect,
1542                                 &task->render_buffer.pass_stride,
1543                                 &task->render_buffer.offset};
1544                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1545                 cuda_assert(cuCtxSynchronize());
1546
1547                 return !have_error();
1548         }
1549
1550         bool denoising_get_feature(int mean_offset,
1551                                    int variance_offset,
1552                                    device_ptr mean_ptr,
1553                                    device_ptr variance_ptr,
1554                                    float scale,
1555                                    DenoisingTask *task)
1556         {
1557                 if(have_error())
1558                         return false;
1559
1560                 CUDAContextScope scope(this);
1561
1562                 CUfunction cuFilterGetFeature;
1563                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1564                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1565                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1566                                    task->rect.z-task->rect.x,
1567                                    task->rect.w-task->rect.y);
1568
1569                 void *args[] = {&task->render_buffer.samples,
1570                                 &task->tile_info_mem.device_pointer,
1571                                 &mean_offset,
1572                                 &variance_offset,
1573                                 &mean_ptr,
1574                                 &variance_ptr,
1575                                 &scale,
1576                                 &task->rect,
1577                                 &task->render_buffer.pass_stride,
1578                                 &task->render_buffer.offset};
1579                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1580                 cuda_assert(cuCtxSynchronize());
1581
1582                 return !have_error();
1583         }
1584
1585         bool denoising_write_feature(int out_offset,
1586                                      device_ptr from_ptr,
1587                                      device_ptr buffer_ptr,
1588                                      DenoisingTask *task)
1589         {
1590                 if(have_error())
1591                         return false;
1592
1593                 CUDAContextScope scope(this);
1594
1595                 CUfunction cuFilterWriteFeature;
1596                 cuda_assert(cuModuleGetFunction(&cuFilterWriteFeature, cuFilterModule, "kernel_cuda_filter_write_feature"));
1597                 cuda_assert(cuFuncSetCacheConfig(cuFilterWriteFeature, CU_FUNC_CACHE_PREFER_L1));
1598                 CUDA_GET_BLOCKSIZE(cuFilterWriteFeature,
1599                                    task->filter_area.z,
1600                                    task->filter_area.w);
1601
1602                 void *args[] = {&task->render_buffer.samples,
1603                                 &task->reconstruction_state.buffer_params,
1604                                 &task->filter_area,
1605                                 &from_ptr,
1606                                 &buffer_ptr,
1607                                 &out_offset,
1608                                 &task->rect};
1609                 CUDA_LAUNCH_KERNEL(cuFilterWriteFeature, args);
1610                 cuda_assert(cuCtxSynchronize());
1611
1612                 return !have_error();
1613         }
1614
1615         bool denoising_detect_outliers(device_ptr image_ptr,
1616                                        device_ptr variance_ptr,
1617                                        device_ptr depth_ptr,
1618                                        device_ptr output_ptr,
1619                                        DenoisingTask *task)
1620         {
1621                 if(have_error())
1622                         return false;
1623
1624                 CUDAContextScope scope(this);
1625
1626                 CUfunction cuFilterDetectOutliers;
1627                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1628                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1629                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1630                                    task->rect.z-task->rect.x,
1631                                    task->rect.w-task->rect.y);
1632
1633                 void *args[] = {&image_ptr,
1634                                 &variance_ptr,
1635                                 &depth_ptr,
1636                                 &output_ptr,
1637                                 &task->rect,
1638                                 &task->buffer.pass_stride};
1639
1640                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1641                 cuda_assert(cuCtxSynchronize());
1642
1643                 return !have_error();
1644         }
1645
1646         void denoise(RenderTile &rtile, DenoisingTask& denoising)
1647         {
1648                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1649                 denoising.functions.accumulate = function_bind(&CUDADevice::denoising_accumulate, this, _1, _2, _3, _4, &denoising);
1650                 denoising.functions.solve = function_bind(&CUDADevice::denoising_solve, this, _1, &denoising);
1651                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1652                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1653                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1654                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, _5, &denoising);
1655                 denoising.functions.write_feature = function_bind(&CUDADevice::denoising_write_feature, this, _1, _2, _3, &denoising);
1656                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1657
1658                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1659                 denoising.render_buffer.samples = rtile.sample;
1660                 denoising.buffer.gpu_temporary_mem = true;
1661
1662                 denoising.run_denoising(&rtile);
1663         }
1664
1665         void path_trace(DeviceTask& task, RenderTile& rtile, device_vector<WorkTile>& work_tiles)
1666         {
1667                 scoped_timer timer(&rtile.buffers->render_time);
1668
1669                 if(have_error())
1670                         return;
1671
1672                 CUDAContextScope scope(this);
1673                 CUfunction cuPathTrace;
1674
1675                 /* Get kernel function. */
1676                 if(task.integrator_branched) {
1677                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1678                 }
1679                 else {
1680                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1681                 }
1682
1683                 if(have_error()) {
1684                         return;
1685                 }
1686
1687                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1688
1689                 /* Allocate work tile. */
1690                 work_tiles.alloc(1);
1691
1692                 WorkTile *wtile = work_tiles.data();
1693                 wtile->x = rtile.x;
1694                 wtile->y = rtile.y;
1695                 wtile->w = rtile.w;
1696                 wtile->h = rtile.h;
1697                 wtile->offset = rtile.offset;
1698                 wtile->stride = rtile.stride;
1699                 wtile->buffer = (float*)cuda_device_ptr(rtile.buffer);
1700
1701                 /* Prepare work size. More step samples render faster, but for now we
1702                  * remain conservative for GPUs connected to a display to avoid driver
1703                  * timeouts and display freezing. */
1704                 int min_blocks, num_threads_per_block;
1705                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
1706                 if(!info.display_device) {
1707                         min_blocks *= 8;
1708                 }
1709
1710                 uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);
1711
1712                 /* Render all samples. */
1713                 int start_sample = rtile.start_sample;
1714                 int end_sample = rtile.start_sample + rtile.num_samples;
1715
1716                 for(int sample = start_sample; sample < end_sample; sample += step_samples) {
1717                         /* Setup and copy work tile to device. */
1718                         wtile->start_sample = sample;
1719                         wtile->num_samples = min(step_samples, end_sample - sample);
1720                         work_tiles.copy_to_device();
1721
1722                         CUdeviceptr d_work_tiles = cuda_device_ptr(work_tiles.device_pointer);
1723                         uint total_work_size = wtile->w * wtile->h * wtile->num_samples;
1724                         uint num_blocks = divide_up(total_work_size, num_threads_per_block);
1725
1726                         /* Launch kernel. */
1727                         void *args[] = {&d_work_tiles,
1728                                         &total_work_size};
1729
1730                         cuda_assert(cuLaunchKernel(cuPathTrace,
1731                                                    num_blocks, 1, 1,
1732                                                    num_threads_per_block, 1, 1,
1733                                                    0, 0, args, 0));
1734
1735                         cuda_assert(cuCtxSynchronize());
1736
1737                         /* Update progress. */
1738                         rtile.sample = sample + wtile->num_samples;
1739                         task.update_progress(&rtile, rtile.w*rtile.h*wtile->num_samples);
1740
1741                         if(task.get_cancel()) {
1742                                 if(task.need_finish_queue == false)
1743                                         break;
1744                         }
1745                 }
1746         }
1747
1748         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1749         {
1750                 if(have_error())
1751                         return;
1752
1753                 CUDAContextScope scope(this);
1754
1755                 CUfunction cuFilmConvert;
1756                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1757                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1758
1759                 /* get kernel function */
1760                 if(rgba_half) {
1761                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1762                 }
1763                 else {
1764                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1765                 }
1766
1767
1768                 float sample_scale = 1.0f/(task.sample + 1);
1769
1770                 /* pass in parameters */
1771                 void *args[] = {&d_rgba,
1772                                 &d_buffer,
1773                                 &sample_scale,
1774                                 &task.x,
1775                                 &task.y,
1776                                 &task.w,
1777                                 &task.h,
1778                                 &task.offset,
1779                                 &task.stride};
1780
1781                 /* launch kernel */
1782                 int threads_per_block;
1783                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1784
1785                 int xthreads = (int)sqrt(threads_per_block);
1786                 int ythreads = (int)sqrt(threads_per_block);
1787                 int xblocks = (task.w + xthreads - 1)/xthreads;
1788                 int yblocks = (task.h + ythreads - 1)/ythreads;
1789
1790                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1791
1792                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1793                                            xblocks , yblocks, 1, /* blocks */
1794                                            xthreads, ythreads, 1, /* threads */
1795                                            0, 0, args, 0));
1796
1797                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1798
1799                 cuda_assert(cuCtxSynchronize());
1800         }
1801
1802         void shader(DeviceTask& task)
1803         {
1804                 if(have_error())
1805                         return;
1806
1807                 CUDAContextScope scope(this);
1808
1809                 CUfunction cuShader;
1810                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1811                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1812
1813                 /* get kernel function */
1814                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1815                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1816                 }
1817                 else if(task.shader_eval_type == SHADER_EVAL_DISPLACE) {
1818                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_displace"));
1819                 }
1820                 else {
1821                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_background"));
1822                 }
1823
1824                 /* do tasks in smaller chunks, so we can cancel it */
1825                 const int shader_chunk_size = 65536;
1826                 const int start = task.shader_x;
1827                 const int end = task.shader_x + task.shader_w;
1828                 int offset = task.offset;
1829
1830                 bool canceled = false;
1831                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1832                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1833                                 int shader_w = min(shader_chunk_size, end - shader_x);
1834
1835                                 /* pass in parameters */
1836                                 void *args[8];
1837                                 int arg = 0;
1838                                 args[arg++] = &d_input;
1839                                 args[arg++] = &d_output;
1840                                 args[arg++] = &task.shader_eval_type;
1841                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1842                                         args[arg++] = &task.shader_filter;
1843                                 }
1844                                 args[arg++] = &shader_x;
1845                                 args[arg++] = &shader_w;
1846                                 args[arg++] = &offset;
1847                                 args[arg++] = &sample;
1848
1849                                 /* launch kernel */
1850                                 int threads_per_block;
1851                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1852
1853                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1854
1855                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1856                                 cuda_assert(cuLaunchKernel(cuShader,
1857                                                            xblocks , 1, 1, /* blocks */
1858                                                            threads_per_block, 1, 1, /* threads */
1859                                                            0, 0, args, 0));
1860
1861                                 cuda_assert(cuCtxSynchronize());
1862
1863                                 if(task.get_cancel()) {
1864                                         canceled = true;
1865                                         break;
1866                                 }
1867                         }
1868
1869                         task.update_progress(NULL);
1870                 }
1871         }
1872
1873         CUdeviceptr map_pixels(device_ptr mem)
1874         {
1875                 if(!background) {
1876                         PixelMem pmem = pixel_mem_map[mem];
1877                         CUdeviceptr buffer;
1878
1879                         size_t bytes;
1880                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1881                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1882
1883                         return buffer;
1884                 }
1885
1886                 return cuda_device_ptr(mem);
1887         }
1888
1889         void unmap_pixels(device_ptr mem)
1890         {
1891                 if(!background) {
1892                         PixelMem pmem = pixel_mem_map[mem];
1893
1894                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1895                 }
1896         }
1897
1898         void pixels_alloc(device_memory& mem)
1899         {
1900                 PixelMem pmem;
1901
1902                 pmem.w = mem.data_width;
1903                 pmem.h = mem.data_height;
1904
1905                 CUDAContextScope scope(this);
1906
1907                 glGenBuffers(1, &pmem.cuPBO);
1908                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1909                 if(mem.data_type == TYPE_HALF)
1910                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1911                 else
1912                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1913
1914                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1915
1916                 glActiveTexture(GL_TEXTURE0);
1917                 glGenTextures(1, &pmem.cuTexId);
1918                 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1919                 if(mem.data_type == TYPE_HALF)
1920                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1921                 else
1922                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1923                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1924                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1925                 glBindTexture(GL_TEXTURE_2D, 0);
1926
1927                 CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1928
1929                 if(result == CUDA_SUCCESS) {
1930                         mem.device_pointer = pmem.cuTexId;
1931                         pixel_mem_map[mem.device_pointer] = pmem;
1932
1933                         mem.device_size = mem.memory_size();
1934                         stats.mem_alloc(mem.device_size);
1935
1936                         return;
1937                 }
1938                 else {
1939                         /* failed to register buffer, fallback to no interop */
1940                         glDeleteBuffers(1, &pmem.cuPBO);
1941                         glDeleteTextures(1, &pmem.cuTexId);
1942
1943                         background = true;
1944                 }
1945         }
1946
1947         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1948         {
1949                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1950
1951                 CUDAContextScope scope(this);
1952
1953                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1954                 uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1955                 size_t offset = sizeof(uchar)*4*y*w;
1956                 memcpy((uchar*)mem.host_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1957                 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1958                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1959         }
1960
1961         void pixels_free(device_memory& mem)
1962         {
1963                 if(mem.device_pointer) {
1964                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1965
1966                         CUDAContextScope scope(this);
1967
1968                         cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1969                         glDeleteBuffers(1, &pmem.cuPBO);
1970                         glDeleteTextures(1, &pmem.cuTexId);
1971
1972                         pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1973                         mem.device_pointer = 0;
1974
1975                         stats.mem_free(mem.device_size);
1976                         mem.device_size = 0;
1977                 }
1978         }
1979
1980         void draw_pixels(
1981             device_memory& mem, int y,
1982             int w, int h, int width, int height,
1983             int dx, int dy, int dw, int dh, bool transparent,
1984                 const DeviceDrawParams &draw_params)
1985         {
1986                 assert(mem.type == MEM_PIXELS);
1987
1988                 if(!background) {
1989                         const bool use_fallback_shader = (draw_params.bind_display_space_shader_cb == NULL);
1990                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1991                         float *vpointer;
1992
1993                         CUDAContextScope scope(this);
1994
1995                         /* for multi devices, this assumes the inefficient method that we allocate
1996                          * all pixels on the device even though we only render to a subset */
1997                         size_t offset = 4*y*w;
1998
1999                         if(mem.data_type == TYPE_HALF)
2000                                 offset *= sizeof(GLhalf);
2001                         else
2002                                 offset *= sizeof(uint8_t);
2003
2004                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
2005                         glActiveTexture(GL_TEXTURE0);
2006                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
2007                         if(mem.data_type == TYPE_HALF) {
2008                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
2009                         }
2010                         else {
2011                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
2012                         }
2013                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
2014
2015                         if(transparent) {
2016                                 glEnable(GL_BLEND);
2017                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
2018                         }
2019
2020                         GLint shader_program;
2021                         if(use_fallback_shader) {
2022                                 if(!bind_fallback_display_space_shader(dw, dh)) {
2023                                         return;
2024                                 }
2025                                 shader_program = fallback_shader_program;
2026                         }
2027                         else {
2028                                 draw_params.bind_display_space_shader_cb();
2029                                 glGetIntegerv(GL_CURRENT_PROGRAM, &shader_program);
2030                         }
2031
2032                         if(!vertex_buffer) {
2033                                 glGenBuffers(1, &vertex_buffer);
2034                         }
2035
2036                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
2037                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
2038                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
2039
2040                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
2041
2042                         if(vpointer) {
2043                                 /* texture coordinate - vertex pair */
2044                                 vpointer[0] = 0.0f;
2045                                 vpointer[1] = 0.0f;
2046                                 vpointer[2] = dx;
2047                                 vpointer[3] = dy;
2048
2049                                 vpointer[4] = (float)w/(float)pmem.w;
2050                                 vpointer[5] = 0.0f;
2051                                 vpointer[6] = (float)width + dx;
2052                                 vpointer[7] = dy;
2053
2054                                 vpointer[8] = (float)w/(float)pmem.w;
2055                                 vpointer[9] = (float)h/(float)pmem.h;
2056                                 vpointer[10] = (float)width + dx;
2057                                 vpointer[11] = (float)height + dy;
2058
2059                                 vpointer[12] = 0.0f;
2060                                 vpointer[13] = (float)h/(float)pmem.h;
2061                                 vpointer[14] = dx;
2062                                 vpointer[15] = (float)height + dy;
2063
2064                                 glUnmapBuffer(GL_ARRAY_BUFFER);
2065                         }
2066
2067                         GLuint vertex_array_object;
2068                         GLuint position_attribute, texcoord_attribute;
2069
2070                         glGenVertexArrays(1, &vertex_array_object);
2071                         glBindVertexArray(vertex_array_object);
2072
2073                         texcoord_attribute = glGetAttribLocation(shader_program, "texCoord");
2074                         position_attribute = glGetAttribLocation(shader_program, "pos");
2075
2076                         glEnableVertexAttribArray(texcoord_attribute);
2077                         glEnableVertexAttribArray(position_attribute);
2078
2079                         glVertexAttribPointer(texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
2080                         glVertexAttribPointer(position_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)(sizeof(float) * 2));
2081
2082                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
2083
2084                         if(use_fallback_shader) {
2085                                 glUseProgram(0);
2086                         }
2087                         else {
2088                                 draw_params.unbind_display_space_shader_cb();
2089                         }
2090
2091                         if(transparent) {
2092                                 glDisable(GL_BLEND);
2093                         }
2094
2095                         glBindTexture(GL_TEXTURE_2D, 0);
2096
2097                         return;
2098                 }
2099
2100                 Device::draw_pixels(mem, y, w, h, width, height, dx, dy, dw, dh, transparent, draw_params);
2101         }
2102
2103         void thread_run(DeviceTask *task)
2104         {
2105                 CUDAContextScope scope(this);
2106
2107                 if(task->type == DeviceTask::RENDER) {
2108                         DeviceRequestedFeatures requested_features;
2109                         if(use_split_kernel()) {
2110                                 if(split_kernel == NULL) {
2111                                         split_kernel = new CUDASplitKernel(this);
2112                                         split_kernel->load_kernels(requested_features);
2113                                 }
2114                         }
2115
2116                         device_vector<WorkTile> work_tiles(this, "work_tiles", MEM_READ_ONLY);
2117
2118                         /* keep rendering tiles until done */
2119                         RenderTile tile;
2120                         DenoisingTask denoising(this, *task);
2121
2122                         while(task->acquire_tile(this, tile)) {
2123                                 if(tile.task == RenderTile::PATH_TRACE) {
2124                                         if(use_split_kernel()) {
2125                                                 device_only_memory<uchar> void_buffer(this, "void_buffer");
2126                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
2127                                         }
2128                                         else {
2129                                                 path_trace(*task, tile, work_tiles);
2130                                         }
2131                                 }
2132                                 else if(tile.task == RenderTile::DENOISE) {
2133                                         tile.sample = tile.start_sample + tile.num_samples;
2134
2135                                         denoise(tile, denoising);
2136
2137                                         task->update_progress(&tile, tile.w*tile.h);
2138                                 }
2139
2140                                 task->release_tile(tile);
2141
2142                                 if(task->get_cancel()) {
2143                                         if(task->need_finish_queue == false)
2144                                                 break;
2145                                 }
2146                         }
2147
2148                         work_tiles.free();
2149                 }
2150                 else if(task->type == DeviceTask::SHADER) {
2151                         shader(*task);
2152
2153                         cuda_assert(cuCtxSynchronize());
2154                 }
2155         }
2156
2157         class CUDADeviceTask : public DeviceTask {
2158         public:
2159                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
2160                 : DeviceTask(task)
2161                 {
2162                         run = function_bind(&CUDADevice::thread_run, device, this);
2163                 }
2164         };
2165
2166         int get_split_task_count(DeviceTask& /*task*/)
2167         {
2168                 return 1;
2169         }
2170
2171         void task_add(DeviceTask& task)
2172         {
2173                 CUDAContextScope scope(this);
2174
2175                 /* Load texture info. */
2176                 load_texture_info();
2177
2178                 /* Synchronize all memory copies before executing task. */
2179                 cuda_assert(cuCtxSynchronize());
2180
2181                 if(task.type == DeviceTask::FILM_CONVERT) {
2182                         /* must be done in main thread due to opengl access */
2183                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
2184                 }
2185                 else {
2186                         task_pool.push(new CUDADeviceTask(this, task));
2187                 }
2188         }
2189
2190         void task_wait()
2191         {
2192                 task_pool.wait();
2193         }
2194
2195         void task_cancel()
2196         {
2197                 task_pool.cancel();
2198         }
2199
2200         friend class CUDASplitKernelFunction;
2201         friend class CUDASplitKernel;
2202         friend class CUDAContextScope;
2203 };
2204
2205 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
2206  * now that the definition of that class is complete
2207  */
2208 #undef cuda_assert
2209 #define cuda_assert(stmt) \
2210         { \
2211                 CUresult result = stmt; \
2212                 \
2213                 if(result != CUDA_SUCCESS) { \
2214                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
2215                         if(device->error_msg == "") \
2216                                 device->error_msg = message; \
2217                         fprintf(stderr, "%s\n", message.c_str()); \
2218                         /*cuda_abort();*/ \
2219                         device->cuda_error_documentation(); \
2220                 } \
2221         } (void) 0
2222
2223
2224 /* CUDA context scope. */
2225
2226 CUDAContextScope::CUDAContextScope(CUDADevice *device)
2227 : device(device)
2228 {
2229         cuda_assert(cuCtxPushCurrent(device->cuContext));
2230 }
2231
2232 CUDAContextScope::~CUDAContextScope()
2233 {
2234         cuda_assert(cuCtxPopCurrent(NULL));
2235 }
2236
2237 /* split kernel */
2238
2239 class CUDASplitKernelFunction : public SplitKernelFunction{
2240         CUDADevice* device;
2241         CUfunction func;
2242 public:
2243         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
2244
2245         /* enqueue the kernel, returns false if there is an error */
2246         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
2247         {
2248                 return enqueue(dim, NULL);
2249         }
2250
2251         /* enqueue the kernel, returns false if there is an error */
2252         bool enqueue(const KernelDimensions &dim, void *args[])
2253         {
2254                 if(device->have_error())
2255                         return false;
2256
2257                 CUDAContextScope scope(device);
2258
2259                 /* we ignore dim.local_size for now, as this is faster */
2260                 int threads_per_block;
2261                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
2262
2263                 int xblocks = (dim.global_size[0]*dim.global_size[1] + threads_per_block - 1)/threads_per_block;
2264
2265                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
2266
2267                 cuda_assert(cuLaunchKernel(func,
2268                                            xblocks, 1, 1, /* blocks */
2269                                            threads_per_block, 1, 1, /* threads */
2270                                            0, 0, args, 0));
2271
2272                 return !device->have_error();
2273         }
2274 };
2275
2276 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
2277 {
2278 }
2279
2280 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
2281 {
2282         CUDAContextScope scope(device);
2283
2284         device_vector<uint64_t> size_buffer(device, "size_buffer", MEM_READ_WRITE);
2285         size_buffer.alloc(1);
2286         size_buffer.zero_to_device();
2287
2288         uint threads = num_threads;
2289         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
2290
2291         struct args_t {
2292                 uint* num_threads;
2293                 CUdeviceptr* size;
2294         };
2295
2296         args_t args = {
2297                 &threads,
2298                 &d_size
2299         };
2300
2301         CUfunction state_buffer_size;
2302         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
2303
2304         cuda_assert(cuLaunchKernel(state_buffer_size,
2305                                    1, 1, 1,
2306                                    1, 1, 1,
2307                                    0, 0, (void**)&args, 0));
2308
2309         size_buffer.copy_from_device(0, 1, 1);
2310         size_t size = size_buffer[0];
2311         size_buffer.free();
2312
2313         return size;
2314 }
2315
2316 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
2317                                     RenderTile& rtile,
2318                                     int num_global_elements,
2319                                     device_memory& /*kernel_globals*/,
2320                                     device_memory& /*kernel_data*/,
2321                                     device_memory& split_data,
2322                                     device_memory& ray_state,
2323                                     device_memory& queue_index,
2324                                     device_memory& use_queues_flag,
2325                                     device_memory& work_pool_wgs)
2326 {
2327         CUDAContextScope scope(device);
2328
2329         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
2330         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
2331         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
2332         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
2333         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
2334
2335         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
2336
2337         int end_sample = rtile.start_sample + rtile.num_samples;
2338         int queue_size = dim.global_size[0] * dim.global_size[1];
2339
2340         struct args_t {
2341                 CUdeviceptr* split_data_buffer;
2342                 int* num_elements;
2343                 CUdeviceptr* ray_state;
2344                 int* start_sample;
2345                 int* end_sample;
2346                 int* sx;
2347                 int* sy;
2348                 int* sw;
2349                 int* sh;
2350                 int* offset;
2351                 int* stride;
2352                 CUdeviceptr* queue_index;
2353                 int* queuesize;
2354                 CUdeviceptr* use_queues_flag;
2355                 CUdeviceptr* work_pool_wgs;
2356                 int* num_samples;
2357                 CUdeviceptr* buffer;
2358         };
2359
2360         args_t args = {
2361                 &d_split_data,
2362                 &num_global_elements,
2363                 &d_ray_state,
2364                 &rtile.start_sample,
2365                 &end_sample,
2366                 &rtile.x,
2367                 &rtile.y,
2368                 &rtile.w,
2369                 &rtile.h,
2370                 &rtile.offset,
2371                 &rtile.stride,
2372                 &d_queue_index,
2373                 &queue_size,
2374                 &d_use_queues_flag,
2375                 &d_work_pool_wgs,
2376                 &rtile.num_samples,
2377                 &d_buffer
2378         };
2379
2380         CUfunction data_init;
2381         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2382         if(device->have_error()) {
2383                 return false;
2384         }
2385
2386         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2387
2388         return !device->have_error();
2389 }
2390
2391 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2392                                                                 const DeviceRequestedFeatures&)
2393 {
2394         CUDAContextScope scope(device);
2395         CUfunction func;
2396
2397         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2398         if(device->have_error()) {
2399                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2400                 return NULL;
2401         }
2402
2403         return new CUDASplitKernelFunction(device, func);
2404 }
2405
2406 int2 CUDASplitKernel::split_kernel_local_size()
2407 {
2408         return make_int2(32, 1);
2409 }
2410
2411 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2412 {
2413         CUDAContextScope scope(device);
2414         size_t free;
2415         size_t total;
2416
2417         cuda_assert(cuMemGetInfo(&free, &total));
2418
2419         VLOG(1) << "Maximum device allocation size: "
2420                 << string_human_readable_number(free) << " bytes. ("
2421                 << string_human_readable_size(free) << ").";
2422
2423         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2424         size_t side = round_down((int)sqrt(num_elements), 32);
2425         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2426         VLOG(1) << "Global size: " << global_size << ".";
2427         return global_size;
2428 }
2429
2430 bool device_cuda_init()
2431 {
2432 #ifdef WITH_CUDA_DYNLOAD
2433         static bool initialized = false;
2434         static bool result = false;
2435
2436         if(initialized)
2437                 return result;
2438
2439         initialized = true;
2440         int cuew_result = cuewInit(CUEW_INIT_CUDA);
2441         if(cuew_result == CUEW_SUCCESS) {
2442                 VLOG(1) << "CUEW initialization succeeded";
2443                 if(CUDADevice::have_precompiled_kernels()) {
2444                         VLOG(1) << "Found precompiled kernels";
2445                         result = true;
2446                 }
2447 #ifndef _WIN32
2448                 else if(cuewCompilerPath() != NULL) {
2449                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2450                         result = true;
2451                 }
2452                 else {
2453                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler was found,"
2454                                 << " unable to use CUDA";
2455                 }
2456 #endif
2457         }
2458         else {
2459                 VLOG(1) << "CUEW initialization failed: "
2460                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2461                             ? "Error setting up atexit() handler"
2462                             : "Error opening the library");
2463         }
2464
2465         return result;
2466 #else  /* WITH_CUDA_DYNLOAD */
2467         return true;
2468 #endif  /* WITH_CUDA_DYNLOAD */
2469 }
2470
2471 Device *device_cuda_create(DeviceInfo& info, Stats &stats, Profiler &profiler, bool background)
2472 {
2473         return new CUDADevice(info, stats, profiler, background);
2474 }
2475
2476 static CUresult device_cuda_safe_init()
2477 {
2478 #ifdef _WIN32
2479         __try {
2480                 return cuInit(0);
2481         }
2482         __except(EXCEPTION_EXECUTE_HANDLER) {
2483                 /* Ignore crashes inside the CUDA driver and hope we can
2484                  * survive even with corrupted CUDA installs. */
2485                 fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");
2486         }
2487
2488         return CUDA_ERROR_NO_DEVICE;
2489 #else
2490         return cuInit(0);
2491 #endif
2492 }
2493
2494 void device_cuda_info(vector<DeviceInfo>& devices)
2495 {
2496         CUresult result = device_cuda_safe_init();
2497         if(result != CUDA_SUCCESS) {
2498                 if(result != CUDA_ERROR_NO_DEVICE)
2499                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2500                 return;
2501         }
2502
2503         int count = 0;
2504         result = cuDeviceGetCount(&count);
2505         if(result != CUDA_SUCCESS) {
2506                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2507                 return;
2508         }
2509
2510         vector<DeviceInfo> display_devices;
2511
2512         for(int num = 0; num < count; num++) {
2513                 char name[256];
2514
2515                 result = cuDeviceGetName(name, 256, num);
2516                 if(result != CUDA_SUCCESS) {
2517                         fprintf(stderr, "CUDA cuDeviceGetName: %s\n", cuewErrorString(result));
2518                         continue;
2519                 }
2520
2521                 int major;
2522                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2523                 if(major < 3) {
2524                         VLOG(1) << "Ignoring device \"" << name
2525                                 << "\", this graphics card is no longer supported.";
2526                         continue;
2527                 }
2528
2529                 DeviceInfo info;
2530
2531                 info.type = DEVICE_CUDA;
2532                 info.description = string(name);
2533                 info.num = num;
2534
2535                 info.advanced_shading = (major >= 3);
2536                 info.has_half_images = (major >= 3);
2537                 info.has_volume_decoupled = false;
2538
2539                 int pci_location[3] = {0, 0, 0};
2540                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2541                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2542                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2543                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2544                                         name,
2545                                         (unsigned int)pci_location[0],
2546                                         (unsigned int)pci_location[1],
2547                                         (unsigned int)pci_location[2]);
2548
2549                 /* If device has a kernel timeout and no compute preemption, we assume
2550                  * it is connected to a display and will freeze the display while doing
2551                  * computations. */
2552                 int timeout_attr = 0, preempt_attr = 0;
2553                 cuDeviceGetAttribute(&timeout_attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num);
2554                 cuDeviceGetAttribute(&preempt_attr, CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, num);
2555
2556                 if(timeout_attr && !preempt_attr) {
2557                         VLOG(1) << "Device is recognized as display.";
2558                         info.description += " (Display)";
2559                         info.display_device = true;
2560                         display_devices.push_back(info);
2561                 }
2562                 else {
2563                         devices.push_back(info);
2564                 }
2565                 VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
2566         }
2567
2568         if(!display_devices.empty())
2569                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2570 }
2571
2572 string device_cuda_capabilities()
2573 {
2574         CUresult result = device_cuda_safe_init();
2575         if(result != CUDA_SUCCESS) {
2576                 if(result != CUDA_ERROR_NO_DEVICE) {
2577                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2578                 }
2579                 return "No CUDA device found\n";
2580         }
2581
2582         int count;
2583         result = cuDeviceGetCount(&count);
2584         if(result != CUDA_SUCCESS) {
2585                 return string("Error getting devices: ") + cuewErrorString(result);
2586         }
2587
2588         string capabilities = "";
2589         for(int num = 0; num < count; num++) {
2590                 char name[256];
2591                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2592                         continue;
2593                 }
2594                 capabilities += string("\t") + name + "\n";
2595                 int value;
2596 #define GET_ATTR(attr) \
2597                 { \
2598                         if(cuDeviceGetAttribute(&value, \
2599                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2600                                                 num) == CUDA_SUCCESS) \
2601                         { \
2602                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2603                                                               value); \
2604                         } \
2605                 } (void) 0
2606                 /* TODO(sergey): Strip all attributes which are not useful for us
2607                  * or does not depend on the driver.
2608                  */
2609                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2610                 GET_ATTR(MAX_BLOCK_DIM_X);
2611                 GET_ATTR(MAX_BLOCK_DIM_Y);
2612                 GET_ATTR(MAX_BLOCK_DIM_Z);
2613                 GET_ATTR(MAX_GRID_DIM_X);
2614                 GET_ATTR(MAX_GRID_DIM_Y);
2615                 GET_ATTR(MAX_GRID_DIM_Z);
2616                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2617                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2618                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2619                 GET_ATTR(WARP_SIZE);
2620                 GET_ATTR(MAX_PITCH);
2621                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2622                 GET_ATTR(REGISTERS_PER_BLOCK);
2623                 GET_ATTR(CLOCK_RATE);
2624                 GET_ATTR(TEXTURE_ALIGNMENT);
2625                 GET_ATTR(GPU_OVERLAP);
2626                 GET_ATTR(MULTIPROCESSOR_COUNT);
2627                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2628                 GET_ATTR(INTEGRATED);
2629                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2630                 GET_ATTR(COMPUTE_MODE);
2631                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2632                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2633                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2634                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2635                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2636                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2637                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2638                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2639                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2640                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2641                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2642                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2643                 GET_ATTR(SURFACE_ALIGNMENT);
2644                 GET_ATTR(CONCURRENT_KERNELS);
2645                 GET_ATTR(ECC_ENABLED);
2646                 GET_ATTR(TCC_DRIVER);
2647                 GET_ATTR(MEMORY_CLOCK_RATE);
2648                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2649                 GET_ATTR(L2_CACHE_SIZE);
2650                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2651                 GET_ATTR(ASYNC_ENGINE_COUNT);
2652                 GET_ATTR(UNIFIED_ADDRESSING);
2653                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2654                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2655                 GET_ATTR(CAN_TEX2D_GATHER);
2656                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2657                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2658                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2659                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2660                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2661                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2662                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2663                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2664                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2665                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2666                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2667                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2668                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2669                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2670                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2671                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2672                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2673                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2674                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2675                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2676                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2677                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2678                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2679                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2680                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2681                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2682                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2683                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2684                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2685                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2686                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2687                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2688                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2689                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2690                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2691                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2692                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2693                 GET_ATTR(MANAGED_MEMORY);
2694                 GET_ATTR(MULTI_GPU_BOARD);
2695                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2696 #undef GET_ATTR
2697                 capabilities += "\n";
2698         }
2699
2700         return capabilities;
2701 }
2702
2703 CCL_NAMESPACE_END