Merge branch 'master' into blender2.8
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_foreach.h"
41 #include "util/util_logging.h"
42 #include "util/util_map.h"
43 #include "util/util_md5.h"
44 #include "util/util_opengl.h"
45 #include "util/util_path.h"
46 #include "util/util_string.h"
47 #include "util/util_system.h"
48 #include "util/util_types.h"
49 #include "util/util_time.h"
50
51 #include "kernel/split/kernel_split_data_types.h"
52
53 CCL_NAMESPACE_BEGIN
54
55 #ifndef WITH_CUDA_DYNLOAD
56
57 /* Transparently implement some functions, so majority of the file does not need
58  * to worry about difference between dynamically loaded and linked CUDA at all.
59  */
60
61 namespace {
62
63 const char *cuewErrorString(CUresult result)
64 {
65         /* We can only give error code here without major code duplication, that
66          * should be enough since dynamic loading is only being disabled by folks
67          * who knows what they're doing anyway.
68          *
69          * NOTE: Avoid call from several threads.
70          */
71         static string error;
72         error = string_printf("%d", result);
73         return error.c_str();
74 }
75
76 const char *cuewCompilerPath(void)
77 {
78         return CYCLES_CUDA_NVCC_EXECUTABLE;
79 }
80
81 int cuewCompilerVersion(void)
82 {
83         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
84 }
85
86 }  /* namespace */
87 #endif  /* WITH_CUDA_DYNLOAD */
88
89 class CUDADevice;
90
91 class CUDASplitKernel : public DeviceSplitKernel {
92         CUDADevice *device;
93 public:
94         explicit CUDASplitKernel(CUDADevice *device);
95
96         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
97
98         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
99                                                     RenderTile& rtile,
100                                                     int num_global_elements,
101                                                     device_memory& kernel_globals,
102                                                     device_memory& kernel_data_,
103                                                     device_memory& split_data,
104                                                     device_memory& ray_state,
105                                                     device_memory& queue_index,
106                                                     device_memory& use_queues_flag,
107                                                     device_memory& work_pool_wgs);
108
109         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
110                                                                const DeviceRequestedFeatures&);
111         virtual int2 split_kernel_local_size();
112         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
113 };
114
115 /* Utility to push/pop CUDA context. */
116 class CUDAContextScope {
117 public:
118         CUDAContextScope(CUDADevice *device);
119         ~CUDAContextScope();
120
121 private:
122         CUDADevice *device;
123 };
124
125 class CUDADevice : public Device
126 {
127 public:
128         DedicatedTaskPool task_pool;
129         CUdevice cuDevice;
130         CUcontext cuContext;
131         CUmodule cuModule, cuFilterModule;
132         size_t device_texture_headroom;
133         size_t device_working_headroom;
134         bool move_texture_to_host;
135         size_t map_host_used;
136         size_t map_host_limit;
137         int can_map_host;
138         int cuDevId;
139         int cuDevArchitecture;
140         bool first_error;
141         CUDASplitKernel *split_kernel;
142
143         struct CUDAMem {
144                 CUDAMem()
145                 : texobject(0), array(0), map_host_pointer(0), free_map_host(false) {}
146
147                 CUtexObject texobject;
148                 CUarray array;
149                 void *map_host_pointer;
150                 bool free_map_host;
151         };
152         typedef map<device_memory*, CUDAMem> CUDAMemMap;
153         CUDAMemMap cuda_mem_map;
154
155         struct PixelMem {
156                 GLuint cuPBO;
157                 CUgraphicsResource cuPBOresource;
158                 GLuint cuTexId;
159                 int w, h;
160         };
161         map<device_ptr, PixelMem> pixel_mem_map;
162
163         /* Bindless Textures */
164         device_vector<TextureInfo> texture_info;
165         bool need_texture_info;
166
167         CUdeviceptr cuda_device_ptr(device_ptr mem)
168         {
169                 return (CUdeviceptr)mem;
170         }
171
172         static bool have_precompiled_kernels()
173         {
174                 string cubins_path = path_get("lib");
175                 return path_exists(cubins_path);
176         }
177
178         virtual bool show_samples() const
179         {
180                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
181                 return true;
182         }
183
184 /*#ifdef NDEBUG
185 #define cuda_abort()
186 #else
187 #define cuda_abort() abort()
188 #endif*/
189         void cuda_error_documentation()
190         {
191                 if(first_error) {
192                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
193                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
194                         first_error = false;
195                 }
196         }
197
198 #define cuda_assert(stmt) \
199         { \
200                 CUresult result = stmt; \
201                 \
202                 if(result != CUDA_SUCCESS) { \
203                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
204                         if(error_msg == "") \
205                                 error_msg = message; \
206                         fprintf(stderr, "%s\n", message.c_str()); \
207                         /*cuda_abort();*/ \
208                         cuda_error_documentation(); \
209                 } \
210         } (void)0
211
212         bool cuda_error_(CUresult result, const string& stmt)
213         {
214                 if(result == CUDA_SUCCESS)
215                         return false;
216
217                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
218                 if(error_msg == "")
219                         error_msg = message;
220                 fprintf(stderr, "%s\n", message.c_str());
221                 cuda_error_documentation();
222                 return true;
223         }
224
225 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
226
227         void cuda_error_message(const string& message)
228         {
229                 if(error_msg == "")
230                         error_msg = message;
231                 fprintf(stderr, "%s\n", message.c_str());
232                 cuda_error_documentation();
233         }
234
235         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
236         : Device(info, stats, background_),
237           texture_info(this, "__texture_info", MEM_TEXTURE)
238         {
239                 first_error = true;
240                 background = background_;
241
242                 cuDevId = info.num;
243                 cuDevice = 0;
244                 cuContext = 0;
245
246                 cuModule = 0;
247                 cuFilterModule = 0;
248
249                 split_kernel = NULL;
250
251                 need_texture_info = false;
252
253                 device_texture_headroom = 0;
254                 device_working_headroom = 0;
255                 move_texture_to_host = false;
256                 map_host_limit = 0;
257                 map_host_used = 0;
258                 can_map_host = 0;
259
260                 /* Intialize CUDA. */
261                 if(cuda_error(cuInit(0)))
262                         return;
263
264                 /* Setup device and context. */
265                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
266                         return;
267
268                 /* CU_CTX_MAP_HOST for mapping host memory when out of device memory.
269                  * CU_CTX_LMEM_RESIZE_TO_MAX for reserving local memory ahead of render,
270                  * so we can predict which memory to map to host. */
271                 cuda_assert(cuDeviceGetAttribute(&can_map_host, CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, cuDevice));
272
273                 unsigned int ctx_flags = CU_CTX_LMEM_RESIZE_TO_MAX;
274                 if(can_map_host) {
275                         ctx_flags |= CU_CTX_MAP_HOST;
276                         init_host_memory();
277                 }
278
279                 /* Create context. */
280                 CUresult result;
281
282                 if(background) {
283                         result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
284                 }
285                 else {
286                         result = cuGLCtxCreate(&cuContext, ctx_flags, cuDevice);
287
288                         if(result != CUDA_SUCCESS) {
289                                 result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
290                                 background = true;
291                         }
292                 }
293
294                 if(cuda_error_(result, "cuCtxCreate"))
295                         return;
296
297                 int major, minor;
298                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
299                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
300                 cuDevArchitecture = major*100 + minor*10;
301
302                 /* Pop context set by cuCtxCreate. */
303                 cuCtxPopCurrent(NULL);
304         }
305
306         ~CUDADevice()
307         {
308                 task_pool.stop();
309
310                 delete split_kernel;
311
312                 texture_info.free();
313
314                 cuda_assert(cuCtxDestroy(cuContext));
315         }
316
317         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
318         {
319                 int major, minor;
320                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
321                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
322
323                 /* We only support sm_30 and above */
324                 if(major < 3) {
325                         cuda_error_message(string_printf("CUDA device supported only with compute capability 3.0 or up, found %d.%d.", major, minor));
326                         return false;
327                 }
328
329                 return true;
330         }
331
332         bool use_adaptive_compilation()
333         {
334                 return DebugFlags().cuda.adaptive_compile;
335         }
336
337         bool use_split_kernel()
338         {
339                 return DebugFlags().cuda.split_kernel;
340         }
341
342         /* Common NVCC flags which stays the same regardless of shading model,
343          * kernel sources md5 and only depends on compiler or compilation settings.
344          */
345         string compile_kernel_get_common_cflags(
346                 const DeviceRequestedFeatures& requested_features,
347                 bool filter=false, bool split=false)
348         {
349                 const int machine = system_cpu_bits();
350                 const string source_path = path_get("source");
351                 const string include_path = source_path;
352                 string cflags = string_printf("-m%d "
353                                               "--ptxas-options=\"-v\" "
354                                               "--use_fast_math "
355                                               "-DNVCC "
356                                                "-I\"%s\"",
357                                               machine,
358                                               include_path.c_str());
359                 if(!filter && use_adaptive_compilation()) {
360                         cflags += " " + requested_features.get_build_options();
361                 }
362                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
363                 if(extra_cflags) {
364                         cflags += string(" ") + string(extra_cflags);
365                 }
366 #ifdef WITH_CYCLES_DEBUG
367                 cflags += " -D__KERNEL_DEBUG__";
368 #endif
369
370                 if(split) {
371                         cflags += " -D__SPLIT__";
372                 }
373
374                 return cflags;
375         }
376
377         bool compile_check_compiler() {
378                 const char *nvcc = cuewCompilerPath();
379                 if(nvcc == NULL) {
380                         cuda_error_message("CUDA nvcc compiler not found. "
381                                            "Install CUDA toolkit in default location.");
382                         return false;
383                 }
384                 const int cuda_version = cuewCompilerVersion();
385                 VLOG(1) << "Found nvcc " << nvcc
386                         << ", CUDA version " << cuda_version
387                         << ".";
388                 const int major = cuda_version / 10, minor = cuda_version & 10;
389                 if(cuda_version == 0) {
390                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
391                         return false;
392                 }
393                 if(cuda_version < 80) {
394                         printf("Unsupported CUDA version %d.%d detected, "
395                                "you need CUDA 8.0 or newer.\n",
396                                major, minor);
397                         return false;
398                 }
399                 else if(cuda_version != 80) {
400                         printf("CUDA version %d.%d detected, build may succeed but only "
401                                "CUDA 8.0 is officially supported.\n",
402                                major, minor);
403                 }
404                 return true;
405         }
406
407         string compile_kernel(const DeviceRequestedFeatures& requested_features,
408                               bool filter=false, bool split=false)
409         {
410                 const char *name, *source;
411                 if(filter) {
412                         name = "filter";
413                         source = "filter.cu";
414                 }
415                 else if(split) {
416                         name = "kernel_split";
417                         source = "kernel_split.cu";
418                 }
419                 else {
420                         name = "kernel";
421                         source = "kernel.cu";
422                 }
423                 /* Compute cubin name. */
424                 int major, minor;
425                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
426                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
427
428                 /* Attempt to use kernel provided with Blender. */
429                 if(!use_adaptive_compilation()) {
430                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
431                                                                     name, major, minor));
432                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
433                         if(path_exists(cubin)) {
434                                 VLOG(1) << "Using precompiled kernel.";
435                                 return cubin;
436                         }
437                 }
438
439                 const string common_cflags =
440                         compile_kernel_get_common_cflags(requested_features, filter, split);
441
442                 /* Try to use locally compiled kernel. */
443                 const string source_path = path_get("source");
444                 const string kernel_md5 = path_files_md5_hash(source_path);
445
446                 /* We include cflags into md5 so changing cuda toolkit or changing other
447                  * compiler command line arguments makes sure cubin gets re-built.
448                  */
449                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
450
451                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
452                                                         name, major, minor,
453                                                         cubin_md5.c_str());
454                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
455                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
456                 if(path_exists(cubin)) {
457                         VLOG(1) << "Using locally compiled kernel.";
458                         return cubin;
459                 }
460
461 #ifdef _WIN32
462                 if(have_precompiled_kernels()) {
463                         if(major < 3) {
464                                 cuda_error_message(string_printf(
465                                         "CUDA device requires compute capability 3.0 or up, "
466                                         "found %d.%d. Your GPU is not supported.",
467                                         major, minor));
468                         }
469                         else {
470                                 cuda_error_message(string_printf(
471                                         "CUDA binary kernel for this graphics card compute "
472                                         "capability (%d.%d) not found.",
473                                         major, minor));
474                         }
475                         return "";
476                 }
477 #endif
478
479                 /* Compile. */
480                 if(!compile_check_compiler()) {
481                         return "";
482                 }
483                 const char *nvcc = cuewCompilerPath();
484                 const string kernel = path_join(
485                         path_join(source_path, "kernel"),
486                         path_join("kernels",
487                                   path_join("cuda", source)));
488                 double starttime = time_dt();
489                 printf("Compiling CUDA kernel ...\n");
490
491                 path_create_directories(cubin);
492
493                 string command = string_printf("\"%s\" "
494                                                "-arch=sm_%d%d "
495                                                "--cubin \"%s\" "
496                                                "-o \"%s\" "
497                                                "%s ",
498                                                nvcc,
499                                                major, minor,
500                                                kernel.c_str(),
501                                                cubin.c_str(),
502                                                common_cflags.c_str());
503
504                 printf("%s\n", command.c_str());
505
506                 if(system(command.c_str()) == -1) {
507                         cuda_error_message("Failed to execute compilation command, "
508                                            "see console for details.");
509                         return "";
510                 }
511
512                 /* Verify if compilation succeeded */
513                 if(!path_exists(cubin)) {
514                         cuda_error_message("CUDA kernel compilation failed, "
515                                            "see console for details.");
516                         return "";
517                 }
518
519                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
520
521                 return cubin;
522         }
523
524         bool load_kernels(const DeviceRequestedFeatures& requested_features)
525         {
526                 /* TODO(sergey): Support kernels re-load for CUDA devices.
527                  *
528                  * Currently re-loading kernel will invalidate memory pointers,
529                  * causing problems in cuCtxSynchronize.
530                  */
531                 if(cuFilterModule && cuModule) {
532                         VLOG(1) << "Skipping kernel reload, not currently supported.";
533                         return true;
534                 }
535
536                 /* check if cuda init succeeded */
537                 if(cuContext == 0)
538                         return false;
539
540                 /* check if GPU is supported */
541                 if(!support_device(requested_features))
542                         return false;
543
544                 /* get kernel */
545                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
546                 if(cubin == "")
547                         return false;
548
549                 string filter_cubin = compile_kernel(requested_features, true, false);
550                 if(filter_cubin == "")
551                         return false;
552
553                 /* open module */
554                 CUDAContextScope scope(this);
555
556                 string cubin_data;
557                 CUresult result;
558
559                 if(path_read_text(cubin, cubin_data))
560                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
561                 else
562                         result = CUDA_ERROR_FILE_NOT_FOUND;
563
564                 if(cuda_error_(result, "cuModuleLoad"))
565                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
566
567                 if(path_read_text(filter_cubin, cubin_data))
568                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
569                 else
570                         result = CUDA_ERROR_FILE_NOT_FOUND;
571
572                 if(cuda_error_(result, "cuModuleLoad"))
573                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
574
575                 if(result == CUDA_SUCCESS) {
576                         reserve_local_memory(requested_features);
577                 }
578
579                 return (result == CUDA_SUCCESS);
580         }
581
582         void reserve_local_memory(const DeviceRequestedFeatures& requested_features)
583         {
584                 if(use_split_kernel()) {
585                         /* Split kernel mostly uses global memory and adaptive compilation,
586                          * difficult to predict how much is needed currently. */
587                         return;
588                 }
589
590                 /* Together with CU_CTX_LMEM_RESIZE_TO_MAX, this reserves local memory
591                  * needed for kernel launches, so that we can reliably figure out when
592                  * to allocate scene data in mapped host memory. */
593                 CUDAContextScope scope(this);
594
595                 size_t total = 0, free_before = 0, free_after = 0;
596                 cuMemGetInfo(&free_before, &total);
597
598                 /* Get kernel function. */
599                 CUfunction cuPathTrace;
600
601                 if(requested_features.use_integrator_branched) {
602                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
603                 }
604                 else {
605                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
606                 }
607
608                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
609
610                 int min_blocks, num_threads_per_block;
611                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
612
613                 /* Launch kernel, using just 1 block appears sufficient to reserve
614                  * memory for all multiprocessors. It would be good to do this in
615                  * parallel for the multi GPU case still to make it faster. */
616                 CUdeviceptr d_work_tiles = 0;
617                 uint total_work_size = 0;
618
619                 void *args[] = {&d_work_tiles,
620                                 &total_work_size};
621
622                 cuda_assert(cuLaunchKernel(cuPathTrace,
623                                            1, 1, 1,
624                                            num_threads_per_block, 1, 1,
625                                            0, 0, args, 0));
626
627                 cuda_assert(cuCtxSynchronize());
628
629                 cuMemGetInfo(&free_after, &total);
630                 VLOG(1) << "Local memory reserved "
631                         << string_human_readable_number(free_before - free_after) << " bytes. ("
632                         << string_human_readable_size(free_before - free_after) << ")";
633
634 #if 0
635                 /* For testing mapped host memory, fill up device memory. */
636                 const size_t keep_mb = 1024;
637
638                 while(free_after > keep_mb * 1024 * 1024LL) {
639                         CUdeviceptr tmp;
640                         cuda_assert(cuMemAlloc(&tmp, 10 * 1024 * 1024LL));
641                         cuMemGetInfo(&free_after, &total);
642                 }
643 #endif
644         }
645
646         void init_host_memory()
647         {
648                 /* Limit amount of host mapped memory, because allocating too much can
649                  * cause system instability. Leave at least half or 4 GB of system
650                  * memory free, whichever is smaller. */
651                 size_t default_limit = 4 * 1024 * 1024 * 1024LL;
652                 size_t system_ram = system_physical_ram();
653
654                 if(system_ram > 0) {
655                         if(system_ram / 2 > default_limit) {
656                                 map_host_limit = system_ram - default_limit;
657                         }
658                         else {
659                                 map_host_limit = system_ram / 2;
660                         }
661                 }
662                 else {
663                         VLOG(1) << "Mapped host memory disabled, failed to get system RAM";
664                         map_host_limit = 0;
665                 }
666
667                 /* Amount of device memory to keep is free after texture memory
668                  * and working memory allocations respectively. We set the working
669                  * memory limit headroom lower so that some space is left after all
670                  * texture memory allocations. */
671                 device_working_headroom = 32 * 1024 * 1024LL; // 32MB
672                 device_texture_headroom = 128 * 1024 * 1024LL; // 128MB
673
674                 VLOG(1) << "Mapped host memory limit set to "
675                         << string_human_readable_number(map_host_limit) << " bytes. ("
676                         << string_human_readable_size(map_host_limit) << ")";
677         }
678
679         void load_texture_info()
680         {
681                 if(need_texture_info) {
682                         texture_info.copy_to_device();
683                         need_texture_info = false;
684                 }
685         }
686
687         void move_textures_to_host(size_t size, bool for_texture)
688         {
689                 /* Signal to reallocate textures in host memory only. */
690                 move_texture_to_host = true;
691
692                 while(size > 0) {
693                         /* Find suitable memory allocation to move. */
694                         device_memory *max_mem = NULL;
695                         size_t max_size = 0;
696                         bool max_is_image = false;
697
698                         foreach(CUDAMemMap::value_type& pair, cuda_mem_map) {
699                                 device_memory& mem = *pair.first;
700                                 CUDAMem *cmem = &pair.second;
701
702                                 bool is_texture = (mem.type == MEM_TEXTURE) && (&mem != &texture_info);
703                                 bool is_image = is_texture && (mem.data_height > 1);
704
705                                 /* Can't move this type of memory. */
706                                 if(!is_texture || cmem->array) {
707                                         continue;
708                                 }
709
710                                 /* Already in host memory. */
711                                 if(cmem->map_host_pointer) {
712                                         continue;
713                                 }
714
715                                 /* For other textures, only move image textures. */
716                                 if(for_texture && !is_image) {
717                                         continue;
718                                 }
719
720                                 /* Try to move largest allocation, prefer moving images. */
721                                 if(is_image > max_is_image ||
722                                    (is_image == max_is_image && mem.device_size > max_size)) {
723                                         max_is_image = is_image;
724                                         max_size = mem.device_size;
725                                         max_mem = &mem;
726                                 }
727                         }
728
729                         /* Move to host memory. This part is mutex protected since
730                          * multiple CUDA devices could be moving the memory. The
731                          * first one will do it, and the rest will adopt the pointer. */
732                         if(max_mem) {
733                                 VLOG(1) << "Move memory from device to host: " << max_mem->name;
734
735                                 static thread_mutex move_mutex;
736                                 thread_scoped_lock lock(move_mutex);
737
738                                 /* Preserve the original device pointer, in case of multi device
739                                  * we can't change it because the pointer mapping would break. */
740                                 device_ptr prev_pointer = max_mem->device_pointer;
741                                 size_t prev_size = max_mem->device_size;
742
743                                 tex_free(*max_mem);
744                                 tex_alloc(*max_mem);
745                                 size = (max_size >= size)? 0: size - max_size;
746
747                                 max_mem->device_pointer = prev_pointer;
748                                 max_mem->device_size = prev_size;
749                         }
750                         else {
751                                 break;
752                         }
753                 }
754
755                 /* Update texture info array with new pointers. */
756                 load_texture_info();
757
758                 move_texture_to_host = false;
759         }
760
761         CUDAMem *generic_alloc(device_memory& mem, size_t pitch_padding = 0)
762         {
763                 CUDAContextScope scope(this);
764
765                 CUdeviceptr device_pointer = 0;
766                 size_t size = mem.memory_size() + pitch_padding;
767
768                 CUresult mem_alloc_result = CUDA_ERROR_OUT_OF_MEMORY;
769                 const char *status = "";
770
771                 /* First try allocating in device memory, respecting headroom. We make
772                  * an exception for texture info. It is small and frequently accessed,
773                  * so treat it as working memory.
774                  *
775                  * If there is not enough room for working memory, we will try to move
776                  * textures to host memory, assuming the performance impact would have
777                  * been worse for working memory. */
778                 bool is_texture = (mem.type == MEM_TEXTURE) && (&mem != &texture_info);
779                 bool is_image = is_texture && (mem.data_height > 1);
780
781                 size_t headroom = (is_texture)? device_texture_headroom:
782                                                 device_working_headroom;
783
784                 size_t total = 0, free = 0;
785                 cuMemGetInfo(&free, &total);
786
787                 /* Move textures to host memory if needed. */
788                 if(!move_texture_to_host && !is_image && (size + headroom) >= free) {
789                         move_textures_to_host(size + headroom - free, is_texture);
790                         cuMemGetInfo(&free, &total);
791                 }
792
793                 /* Allocate in device memory. */
794                 if(!move_texture_to_host && (size + headroom) < free) {
795                         mem_alloc_result = cuMemAlloc(&device_pointer, size);
796                         if(mem_alloc_result == CUDA_SUCCESS) {
797                                 status = " in device memory";
798                         }
799                 }
800
801                 /* Fall back to mapped host memory if needed and possible. */
802                 void *map_host_pointer = 0;
803                 bool free_map_host = false;
804
805                 if(mem_alloc_result != CUDA_SUCCESS && can_map_host &&
806                    map_host_used + size < map_host_limit) {
807                         if(mem.shared_pointer) {
808                                 /* Another device already allocated host memory. */
809                                 mem_alloc_result = CUDA_SUCCESS;
810                                 map_host_pointer = mem.shared_pointer;
811                         }
812                         else {
813                                 /* Allocate host memory ourselves. */
814                                 mem_alloc_result = cuMemHostAlloc(&map_host_pointer, size,
815                                                                   CU_MEMHOSTALLOC_DEVICEMAP |
816                                                                   CU_MEMHOSTALLOC_WRITECOMBINED);
817                                 mem.shared_pointer = map_host_pointer;
818                                 free_map_host = true;
819                         }
820
821                         if(mem_alloc_result == CUDA_SUCCESS) {
822                                 cuda_assert(cuMemHostGetDevicePointer_v2(&device_pointer, mem.shared_pointer, 0));
823                                 map_host_used += size;
824                                 status = " in host memory";
825
826                                 /* Replace host pointer with our host allocation. Only works if
827                                  * CUDA memory layout is the same and has no pitch padding. Also
828                                  * does not work if we move textures to host during a render,
829                                  * since other devices might be using the memory. */
830                                 if(!move_texture_to_host && pitch_padding == 0 &&
831                                    mem.host_pointer && mem.host_pointer != mem.shared_pointer) {
832                                         memcpy(mem.shared_pointer, mem.host_pointer, size);
833                                         mem.host_free();
834                                         mem.host_pointer = mem.shared_pointer;
835                                 }
836                         }
837                         else {
838                                 status = " failed, out of host memory";
839                         }
840                 }
841                 else if(mem_alloc_result != CUDA_SUCCESS) {
842                         status = " failed, out of device and host memory";
843                 }
844
845                 if(mem_alloc_result != CUDA_SUCCESS) {
846                         cuda_assert(mem_alloc_result);
847                 }
848
849                 if(mem.name) {
850                         VLOG(1) << "Buffer allocate: " << mem.name << ", "
851                                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
852                                         << string_human_readable_size(mem.memory_size()) << ")"
853                                         << status;
854                 }
855
856                 mem.device_pointer = (device_ptr)device_pointer;
857                 mem.device_size = size;
858                 stats.mem_alloc(size);
859
860                 if(!mem.device_pointer) {
861                         return NULL;
862                 }
863
864                 /* Insert into map of allocations. */
865                 CUDAMem *cmem = &cuda_mem_map[&mem];
866                 cmem->map_host_pointer = map_host_pointer;
867                 cmem->free_map_host = free_map_host;
868                 return cmem;
869         }
870
871         void generic_copy_to(device_memory& mem)
872         {
873                 if(mem.host_pointer && mem.device_pointer) {
874                         CUDAContextScope scope(this);
875
876                         if(mem.host_pointer != mem.shared_pointer) {
877                                 cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer),
878                                                          mem.host_pointer,
879                                                          mem.memory_size()));
880                         }
881                 }
882         }
883
884         void generic_free(device_memory& mem)
885         {
886                 if(mem.device_pointer) {
887                         CUDAContextScope scope(this);
888                         const CUDAMem& cmem = cuda_mem_map[&mem];
889
890                         if(cmem.map_host_pointer) {
891                                 /* Free host memory. */
892                                 if(cmem.free_map_host) {
893                                         cuMemFreeHost(cmem.map_host_pointer);
894                                         if(mem.host_pointer == mem.shared_pointer) {
895                                                 mem.host_pointer = 0;
896                                         }
897                                         mem.shared_pointer = 0;
898                                 }
899
900                                 map_host_used -= mem.device_size;
901                         }
902                         else {
903                                 /* Free device memory. */
904                                 cuMemFree(mem.device_pointer);
905                         }
906
907                         stats.mem_free(mem.device_size);
908                         mem.device_pointer = 0;
909                         mem.device_size = 0;
910
911                         cuda_mem_map.erase(cuda_mem_map.find(&mem));
912                 }
913         }
914
915         void mem_alloc(device_memory& mem)
916         {
917                 if(mem.type == MEM_PIXELS && !background) {
918                         pixels_alloc(mem);
919                 }
920                 else if(mem.type == MEM_TEXTURE) {
921                         assert(!"mem_alloc not supported for textures.");
922                 }
923                 else {
924                         generic_alloc(mem);
925                 }
926         }
927
928         void mem_copy_to(device_memory& mem)
929         {
930                 if(mem.type == MEM_PIXELS) {
931                         assert(!"mem_copy_to not supported for pixels.");
932                 }
933                 else if(mem.type == MEM_TEXTURE) {
934                         tex_free(mem);
935                         tex_alloc(mem);
936                 }
937                 else {
938                         if(!mem.device_pointer) {
939                                 generic_alloc(mem);
940                         }
941
942                         generic_copy_to(mem);
943                 }
944         }
945
946         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
947         {
948                 if(mem.type == MEM_PIXELS && !background) {
949                         pixels_copy_from(mem, y, w, h);
950                 }
951                 else if(mem.type == MEM_TEXTURE) {
952                         assert(!"mem_copy_from not supported for textures.");
953                 }
954                 else {
955                         CUDAContextScope scope(this);
956                         size_t offset = elem*y*w;
957                         size_t size = elem*w*h;
958
959                         if(mem.host_pointer && mem.device_pointer) {
960                                 cuda_assert(cuMemcpyDtoH((uchar*)mem.host_pointer + offset,
961                                                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
962                         }
963                         else if(mem.host_pointer) {
964                                 memset((char*)mem.host_pointer + offset, 0, size);
965                         }
966                 }
967         }
968
969         void mem_zero(device_memory& mem)
970         {
971                 if(!mem.device_pointer) {
972                         mem_alloc(mem);
973                 }
974
975                 if(mem.host_pointer) {
976                         memset(mem.host_pointer, 0, mem.memory_size());
977                 }
978
979                 if(mem.device_pointer &&
980                    (!mem.host_pointer || mem.host_pointer != mem.shared_pointer)) {
981                         CUDAContextScope scope(this);
982                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
983                 }
984         }
985
986         void mem_free(device_memory& mem)
987         {
988                 if(mem.type == MEM_PIXELS && !background) {
989                         pixels_free(mem);
990                 }
991                 else if(mem.type == MEM_TEXTURE) {
992                         tex_free(mem);
993                 }
994                 else {
995                         generic_free(mem);
996                 }
997         }
998
999         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
1000         {
1001                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
1002         }
1003
1004         void const_copy_to(const char *name, void *host, size_t size)
1005         {
1006                 CUDAContextScope scope(this);
1007                 CUdeviceptr mem;
1008                 size_t bytes;
1009
1010                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
1011                 //assert(bytes == size);
1012                 cuda_assert(cuMemcpyHtoD(mem, host, size));
1013         }
1014
1015         void tex_alloc(device_memory& mem)
1016         {
1017                 CUDAContextScope scope(this);
1018
1019                 /* General variables for both architectures */
1020                 string bind_name = mem.name;
1021                 size_t dsize = datatype_size(mem.data_type);
1022                 size_t size = mem.memory_size();
1023
1024                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
1025                 switch(mem.extension) {
1026                         case EXTENSION_REPEAT:
1027                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
1028                                 break;
1029                         case EXTENSION_EXTEND:
1030                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
1031                                 break;
1032                         case EXTENSION_CLIP:
1033                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
1034                                 break;
1035                         default:
1036                                 assert(0);
1037                                 break;
1038                 }
1039
1040                 CUfilter_mode filter_mode;
1041                 if(mem.interpolation == INTERPOLATION_CLOSEST) {
1042                         filter_mode = CU_TR_FILTER_MODE_POINT;
1043                 }
1044                 else {
1045                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
1046                 }
1047
1048                 /* Data Storage */
1049                 if(mem.interpolation == INTERPOLATION_NONE) {
1050                         generic_alloc(mem);
1051                         generic_copy_to(mem);
1052
1053                         CUdeviceptr cumem;
1054                         size_t cubytes;
1055
1056                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
1057
1058                         if(cubytes == 8) {
1059                                 /* 64 bit device pointer */
1060                                 uint64_t ptr = mem.device_pointer;
1061                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
1062                         }
1063                         else {
1064                                 /* 32 bit device pointer */
1065                                 uint32_t ptr = (uint32_t)mem.device_pointer;
1066                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
1067                         }
1068                         return;
1069                 }
1070
1071                 /* Image Texture Storage */
1072                 CUarray_format_enum format;
1073                 switch(mem.data_type) {
1074                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
1075                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
1076                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
1077                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
1078                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
1079                         default: assert(0); return;
1080                 }
1081
1082                 CUDAMem *cmem = NULL;
1083                 CUarray array_3d = NULL;
1084                 size_t src_pitch = mem.data_width * dsize * mem.data_elements;
1085                 size_t dst_pitch = src_pitch;
1086
1087                 if(mem.data_depth > 1) {
1088                         /* 3D texture using array, there is no API for linear memory. */
1089                         CUDA_ARRAY3D_DESCRIPTOR desc;
1090
1091                         desc.Width = mem.data_width;
1092                         desc.Height = mem.data_height;
1093                         desc.Depth = mem.data_depth;
1094                         desc.Format = format;
1095                         desc.NumChannels = mem.data_elements;
1096                         desc.Flags = 0;
1097
1098                         VLOG(1) << "Array 3D allocate: " << mem.name << ", "
1099                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
1100                                 << string_human_readable_size(mem.memory_size()) << ")";
1101
1102                         cuda_assert(cuArray3DCreate(&array_3d, &desc));
1103
1104                         if(!array_3d) {
1105                                 return;
1106                         }
1107
1108                         CUDA_MEMCPY3D param;
1109                         memset(&param, 0, sizeof(param));
1110                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
1111                         param.dstArray = array_3d;
1112                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
1113                         param.srcHost = mem.host_pointer;
1114                         param.srcPitch = src_pitch;
1115                         param.WidthInBytes = param.srcPitch;
1116                         param.Height = mem.data_height;
1117                         param.Depth = mem.data_depth;
1118
1119                         cuda_assert(cuMemcpy3D(&param));
1120
1121                         mem.device_pointer = (device_ptr)array_3d;
1122                         mem.device_size = size;
1123                         stats.mem_alloc(size);
1124
1125                         cmem = &cuda_mem_map[&mem];
1126                         cmem->texobject = 0;
1127                         cmem->array = array_3d;
1128                 }
1129                 else if(mem.data_height > 0) {
1130                         /* 2D texture, using pitch aligned linear memory. */
1131                         int alignment = 0;
1132                         cuda_assert(cuDeviceGetAttribute(&alignment, CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT, cuDevice));
1133                         dst_pitch = align_up(src_pitch, alignment);
1134                         size_t dst_size = dst_pitch * mem.data_height;
1135
1136                         cmem = generic_alloc(mem, dst_size - mem.memory_size());
1137                         if(!cmem) {
1138                                 return;
1139                         }
1140
1141                         CUDA_MEMCPY2D param;
1142                         memset(&param, 0, sizeof(param));
1143                         param.dstMemoryType = CU_MEMORYTYPE_DEVICE;
1144                         param.dstDevice = mem.device_pointer;
1145                         param.dstPitch = dst_pitch;
1146                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
1147                         param.srcHost = mem.host_pointer;
1148                         param.srcPitch = src_pitch;
1149                         param.WidthInBytes = param.srcPitch;
1150                         param.Height = mem.data_height;
1151
1152                         cuda_assert(cuMemcpy2DUnaligned(&param));
1153                 }
1154                 else {
1155                         /* 1D texture, using linear memory. */
1156                         cmem = generic_alloc(mem);
1157                         if(!cmem) {
1158                                 return;
1159                         }
1160
1161                         cuda_assert(cuMemcpyHtoD(mem.device_pointer, mem.host_pointer, size));
1162                 }
1163
1164                 /* Kepler+, bindless textures. */
1165                 int flat_slot = 0;
1166                 if(string_startswith(mem.name, "__tex_image")) {
1167                         int pos =  string(mem.name).rfind("_");
1168                         flat_slot = atoi(mem.name + pos + 1);
1169                 }
1170                 else {
1171                         assert(0);
1172                 }
1173
1174                 CUDA_RESOURCE_DESC resDesc;
1175                 memset(&resDesc, 0, sizeof(resDesc));
1176
1177                 if(array_3d) {
1178                         resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
1179                         resDesc.res.array.hArray = array_3d;
1180                         resDesc.flags = 0;
1181                 }
1182                 else if(mem.data_height > 0) {
1183                         resDesc.resType = CU_RESOURCE_TYPE_PITCH2D;
1184                         resDesc.res.pitch2D.devPtr = mem.device_pointer;
1185                         resDesc.res.pitch2D.format = format;
1186                         resDesc.res.pitch2D.numChannels = mem.data_elements;
1187                         resDesc.res.pitch2D.height = mem.data_height;
1188                         resDesc.res.pitch2D.width = mem.data_width;
1189                         resDesc.res.pitch2D.pitchInBytes = dst_pitch;
1190                 }
1191                 else {
1192                         resDesc.resType = CU_RESOURCE_TYPE_LINEAR;
1193                         resDesc.res.linear.devPtr = mem.device_pointer;
1194                         resDesc.res.linear.format = format;
1195                         resDesc.res.linear.numChannels = mem.data_elements;
1196                         resDesc.res.linear.sizeInBytes = mem.device_size;
1197                 }
1198
1199                 CUDA_TEXTURE_DESC texDesc;
1200                 memset(&texDesc, 0, sizeof(texDesc));
1201                 texDesc.addressMode[0] = address_mode;
1202                 texDesc.addressMode[1] = address_mode;
1203                 texDesc.addressMode[2] = address_mode;
1204                 texDesc.filterMode = filter_mode;
1205                 texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
1206
1207                 cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
1208
1209                 /* Resize once */
1210                 if(flat_slot >= texture_info.size()) {
1211                         /* Allocate some slots in advance, to reduce amount
1212                          * of re-allocations. */
1213                         texture_info.resize(flat_slot + 128);
1214                 }
1215
1216                 /* Set Mapping and tag that we need to (re-)upload to device */
1217                 TextureInfo& info = texture_info[flat_slot];
1218                 info.data = (uint64_t)cmem->texobject;
1219                 info.cl_buffer = 0;
1220                 info.interpolation = mem.interpolation;
1221                 info.extension = mem.extension;
1222                 info.width = mem.data_width;
1223                 info.height = mem.data_height;
1224                 info.depth = mem.data_depth;
1225                 need_texture_info = true;
1226         }
1227
1228         void tex_free(device_memory& mem)
1229         {
1230                 if(mem.device_pointer) {
1231                         CUDAContextScope scope(this);
1232                         const CUDAMem& cmem = cuda_mem_map[&mem];
1233
1234                         if(cmem.texobject) {
1235                                 /* Free bindless texture. */
1236                                 cuTexObjectDestroy(cmem.texobject);
1237                         }
1238
1239                         if(cmem.array) {
1240                                 /* Free array. */
1241                                 cuArrayDestroy(cmem.array);
1242                                 stats.mem_free(mem.device_size);
1243                                 mem.device_pointer = 0;
1244                                 mem.device_size = 0;
1245
1246                                 cuda_mem_map.erase(cuda_mem_map.find(&mem));
1247                         }
1248                         else {
1249                                 generic_free(mem);
1250                         }
1251                 }
1252         }
1253
1254         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
1255         {
1256                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.host_pointer;
1257                 for(int i = 0; i < 9; i++) {
1258                         tiles->buffers[i] = buffers[i];
1259                 }
1260
1261                 task->tiles_mem.copy_to_device();
1262
1263                 return !have_error();
1264         }
1265
1266 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
1267                         int threads_per_block;                                                                              \
1268                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1269                         int threads = (int)sqrt((float)threads_per_block);                                                  \
1270                         int xblocks = ((w) + threads - 1)/threads;                                                          \
1271                         int yblocks = ((h) + threads - 1)/threads;
1272
1273 #define CUDA_LAUNCH_KERNEL(func, args)                      \
1274                         cuda_assert(cuLaunchKernel(func,                \
1275                                                    xblocks, yblocks, 1, \
1276                                                    threads, threads, 1, \
1277                                                    0, 0, args, 0));
1278
1279 /* Similar as above, but for 1-dimensional blocks. */
1280 #define CUDA_GET_BLOCKSIZE_1D(func, w, h)                                                                       \
1281                         int threads_per_block;                                                                              \
1282                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1283                         int xblocks = ((w) + threads_per_block - 1)/threads_per_block;                                      \
1284                         int yblocks = h;
1285
1286 #define CUDA_LAUNCH_KERNEL_1D(func, args)                       \
1287                         cuda_assert(cuLaunchKernel(func,                    \
1288                                                    xblocks, yblocks, 1,     \
1289                                                    threads_per_block, 1, 1, \
1290                                                    0, 0, args, 0));
1291
1292         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
1293                                        DenoisingTask *task)
1294         {
1295                 if(have_error())
1296                         return false;
1297
1298                 CUDAContextScope scope(this);
1299
1300                 int stride = task->buffer.stride;
1301                 int w = task->buffer.width;
1302                 int h = task->buffer.h;
1303                 int r = task->nlm_state.r;
1304                 int f = task->nlm_state.f;
1305                 float a = task->nlm_state.a;
1306                 float k_2 = task->nlm_state.k_2;
1307
1308                 int shift_stride = stride*h;
1309                 int num_shifts = (2*r+1)*(2*r+1);
1310                 int mem_size = sizeof(float)*shift_stride*num_shifts;
1311                 int channel_offset = 0;
1312
1313                 device_only_memory<uchar> temporary_mem(this, "Denoising temporary_mem");
1314                 temporary_mem.alloc_to_device(2*mem_size);
1315
1316                 if(have_error())
1317                         return false;
1318
1319                 CUdeviceptr difference     = cuda_device_ptr(temporary_mem.device_pointer);
1320                 CUdeviceptr blurDifference = difference + mem_size;
1321
1322                 CUdeviceptr weightAccum = task->nlm_state.temporary_3_ptr;
1323                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*shift_stride));
1324                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*shift_stride));
1325
1326                 {
1327                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput;
1328                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1329                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1330                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1331                         cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
1332
1333                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
1334                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
1335                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
1336                         cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
1337
1338                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference, w*h, num_shifts);
1339
1340                         void *calc_difference_args[] = {&guide_ptr, &variance_ptr, &difference, &w, &h, &stride, &shift_stride, &r, &channel_offset, &a, &k_2};
1341                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &shift_stride, &r, &f};
1342                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &shift_stride, &r, &f};
1343                         void *update_output_args[]   = {&blurDifference, &image_ptr, &out_ptr, &weightAccum, &w, &h, &stride, &shift_stride, &r, &f};
1344
1345                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1346                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1347                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1348                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1349                         CUDA_LAUNCH_KERNEL_1D(cuNLMUpdateOutput, update_output_args);
1350                 }
1351
1352                 temporary_mem.free();
1353
1354                 {
1355                         CUfunction cuNLMNormalize;
1356                         cuda_assert(cuModuleGetFunction(&cuNLMNormalize, cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
1357                         cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize, CU_FUNC_CACHE_PREFER_L1));
1358                         void *normalize_args[] = {&out_ptr, &weightAccum, &w, &h, &stride};
1359                         CUDA_GET_BLOCKSIZE(cuNLMNormalize, w, h);
1360                         CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1361                         cuda_assert(cuCtxSynchronize());
1362                 }
1363
1364                 return !have_error();
1365         }
1366
1367         bool denoising_construct_transform(DenoisingTask *task)
1368         {
1369                 if(have_error())
1370                         return false;
1371
1372                 CUDAContextScope scope(this);
1373
1374                 CUfunction cuFilterConstructTransform;
1375                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1376                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1377                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1378                                    task->storage.w,
1379                                    task->storage.h);
1380
1381                 void *args[] = {&task->buffer.mem.device_pointer,
1382                                 &task->storage.transform.device_pointer,
1383                                 &task->storage.rank.device_pointer,
1384                                 &task->filter_area,
1385                                 &task->rect,
1386                                 &task->radius,
1387                                 &task->pca_threshold,
1388                                 &task->buffer.pass_stride};
1389                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1390                 cuda_assert(cuCtxSynchronize());
1391
1392                 return !have_error();
1393         }
1394
1395         bool denoising_reconstruct(device_ptr color_ptr,
1396                                    device_ptr color_variance_ptr,
1397                                    device_ptr output_ptr,
1398                                    DenoisingTask *task)
1399         {
1400                 if(have_error())
1401                         return false;
1402
1403                 CUDAContextScope scope(this);
1404
1405                 mem_zero(task->storage.XtWX);
1406                 mem_zero(task->storage.XtWY);
1407
1408                 int r = task->radius;
1409                 int f = 4;
1410                 float a = 1.0f;
1411                 float k_2 = task->nlm_k_2;
1412
1413                 int w = task->reconstruction_state.source_w;
1414                 int h = task->reconstruction_state.source_h;
1415                 int stride = task->buffer.stride;
1416
1417                 int shift_stride = stride*h;
1418                 int num_shifts = (2*r+1)*(2*r+1);
1419                 int mem_size = sizeof(float)*shift_stride*num_shifts;
1420
1421                 device_only_memory<uchar> temporary_mem(this, "Denoising temporary_mem");
1422                 temporary_mem.alloc_to_device(2*mem_size);
1423
1424                 if(have_error())
1425                         return false;
1426
1427                 CUdeviceptr difference     = cuda_device_ptr(temporary_mem.device_pointer);
1428                 CUdeviceptr blurDifference = difference + mem_size;
1429
1430                 {
1431                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian;
1432                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1433                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1434                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1435                         cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1436
1437                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1438                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1439                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1440                         cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1441
1442                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference,
1443                                              task->reconstruction_state.source_w * task->reconstruction_state.source_h,
1444                                              num_shifts);
1445
1446                         void *calc_difference_args[] = {&color_ptr, &color_variance_ptr, &difference, &w, &h, &stride, &shift_stride, &r, &task->buffer.pass_stride, &a, &k_2};
1447                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &shift_stride, &r, &f};
1448                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &shift_stride, &r, &f};
1449                         void *construct_gramian_args[] = {&blurDifference,
1450                                                           &task->buffer.mem.device_pointer,
1451                                                           &task->storage.transform.device_pointer,
1452                                                           &task->storage.rank.device_pointer,
1453                                                           &task->storage.XtWX.device_pointer,
1454                                                           &task->storage.XtWY.device_pointer,
1455                                                           &task->reconstruction_state.filter_window,
1456                                                           &w, &h, &stride,
1457                                                           &shift_stride, &r,
1458                                                           &f,
1459                                                       &task->buffer.pass_stride};
1460
1461                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1462                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1463                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1464                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1465                         CUDA_LAUNCH_KERNEL_1D(cuNLMConstructGramian, construct_gramian_args);
1466                 }
1467
1468                 temporary_mem.free();
1469
1470                 {
1471                         CUfunction cuFinalize;
1472                         cuda_assert(cuModuleGetFunction(&cuFinalize, cuFilterModule, "kernel_cuda_filter_finalize"));
1473                         cuda_assert(cuFuncSetCacheConfig(cuFinalize, CU_FUNC_CACHE_PREFER_L1));
1474                         void *finalize_args[] = {&output_ptr,
1475                                                          &task->storage.rank.device_pointer,
1476                                                          &task->storage.XtWX.device_pointer,
1477                                                          &task->storage.XtWY.device_pointer,
1478                                                          &task->filter_area,
1479                                                          &task->reconstruction_state.buffer_params.x,
1480                                                          &task->render_buffer.samples};
1481                         CUDA_GET_BLOCKSIZE(cuFinalize,
1482                                            task->reconstruction_state.source_w,
1483                                            task->reconstruction_state.source_h);
1484                         CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1485                 }
1486
1487                 cuda_assert(cuCtxSynchronize());
1488
1489                 return !have_error();
1490         }
1491
1492         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1493                                       device_ptr mean_ptr, device_ptr variance_ptr,
1494                                       int r, int4 rect, DenoisingTask *task)
1495         {
1496                 if(have_error())
1497                         return false;
1498
1499                 CUDAContextScope scope(this);
1500
1501                 CUfunction cuFilterCombineHalves;
1502                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1503                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1504                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1505                                    task->rect.z-task->rect.x,
1506                                    task->rect.w-task->rect.y);
1507
1508                 void *args[] = {&mean_ptr,
1509                                 &variance_ptr,
1510                                 &a_ptr,
1511                                 &b_ptr,
1512                                 &rect,
1513                                 &r};
1514                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1515                 cuda_assert(cuCtxSynchronize());
1516
1517                 return !have_error();
1518         }
1519
1520         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1521                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1522                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1523         {
1524                 if(have_error())
1525                         return false;
1526
1527                 CUDAContextScope scope(this);
1528
1529                 CUfunction cuFilterDivideShadow;
1530                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1531                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1532                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1533                                    task->rect.z-task->rect.x,
1534                                    task->rect.w-task->rect.y);
1535
1536                 void *args[] = {&task->render_buffer.samples,
1537                                 &task->tiles_mem.device_pointer,
1538                                 &a_ptr,
1539                                 &b_ptr,
1540                                 &sample_variance_ptr,
1541                                 &sv_variance_ptr,
1542                                 &buffer_variance_ptr,
1543                                 &task->rect,
1544                                 &task->render_buffer.pass_stride,
1545                                 &task->render_buffer.denoising_data_offset};
1546                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1547                 cuda_assert(cuCtxSynchronize());
1548
1549                 return !have_error();
1550         }
1551
1552         bool denoising_get_feature(int mean_offset,
1553                                    int variance_offset,
1554                                    device_ptr mean_ptr,
1555                                    device_ptr variance_ptr,
1556                                    DenoisingTask *task)
1557         {
1558                 if(have_error())
1559                         return false;
1560
1561                 CUDAContextScope scope(this);
1562
1563                 CUfunction cuFilterGetFeature;
1564                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1565                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1566                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1567                                    task->rect.z-task->rect.x,
1568                                    task->rect.w-task->rect.y);
1569
1570                 void *args[] = {&task->render_buffer.samples,
1571                                 &task->tiles_mem.device_pointer,
1572                                 &mean_offset,
1573                                 &variance_offset,
1574                                 &mean_ptr,
1575                                 &variance_ptr,
1576                                 &task->rect,
1577                                 &task->render_buffer.pass_stride,
1578                                 &task->render_buffer.denoising_data_offset};
1579                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1580                 cuda_assert(cuCtxSynchronize());
1581
1582                 return !have_error();
1583         }
1584
1585         bool denoising_detect_outliers(device_ptr image_ptr,
1586                                        device_ptr variance_ptr,
1587                                        device_ptr depth_ptr,
1588                                        device_ptr output_ptr,
1589                                        DenoisingTask *task)
1590         {
1591                 if(have_error())
1592                         return false;
1593
1594                 CUDAContextScope scope(this);
1595
1596                 CUfunction cuFilterDetectOutliers;
1597                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1598                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1599                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1600                                    task->rect.z-task->rect.x,
1601                                    task->rect.w-task->rect.y);
1602
1603                 void *args[] = {&image_ptr,
1604                                 &variance_ptr,
1605                                 &depth_ptr,
1606                                 &output_ptr,
1607                                 &task->rect,
1608                                 &task->buffer.pass_stride};
1609
1610                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1611                 cuda_assert(cuCtxSynchronize());
1612
1613                 return !have_error();
1614         }
1615
1616         void denoise(RenderTile &rtile, DenoisingTask& denoising, const DeviceTask &task)
1617         {
1618                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1619                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1620                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1621                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1622                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1623                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1624                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1625                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1626
1627                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1628                 denoising.render_buffer.samples = rtile.sample;
1629
1630                 RenderTile rtiles[9];
1631                 rtiles[4] = rtile;
1632                 task.map_neighbor_tiles(rtiles, this);
1633                 denoising.tiles_from_rendertiles(rtiles);
1634
1635                 denoising.init_from_devicetask(task);
1636
1637                 denoising.run_denoising();
1638
1639                 task.unmap_neighbor_tiles(rtiles, this);
1640         }
1641
1642         void path_trace(DeviceTask& task, RenderTile& rtile, device_vector<WorkTile>& work_tiles)
1643         {
1644                 scoped_timer timer(&rtile.buffers->render_time);
1645
1646                 if(have_error())
1647                         return;
1648
1649                 CUDAContextScope scope(this);
1650                 CUfunction cuPathTrace;
1651
1652                 /* Get kernel function. */
1653                 if(task.integrator_branched) {
1654                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1655                 }
1656                 else {
1657                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1658                 }
1659
1660                 if(have_error()) {
1661                         return;
1662                 }
1663
1664                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1665
1666                 /* Allocate work tile. */
1667                 work_tiles.alloc(1);
1668
1669                 WorkTile *wtile = work_tiles.data();
1670                 wtile->x = rtile.x;
1671                 wtile->y = rtile.y;
1672                 wtile->w = rtile.w;
1673                 wtile->h = rtile.h;
1674                 wtile->offset = rtile.offset;
1675                 wtile->stride = rtile.stride;
1676                 wtile->buffer = (float*)cuda_device_ptr(rtile.buffer);
1677
1678                 /* Prepare work size. More step samples render faster, but for now we
1679                  * remain conservative for GPUs connected to a display to avoid driver
1680                  * timeouts and display freezing. */
1681                 int min_blocks, num_threads_per_block;
1682                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
1683                 if(!info.display_device) {
1684                         min_blocks *= 8;
1685                 }
1686
1687                 uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);
1688
1689                 /* Render all samples. */
1690                 int start_sample = rtile.start_sample;
1691                 int end_sample = rtile.start_sample + rtile.num_samples;
1692
1693                 for(int sample = start_sample; sample < end_sample; sample += step_samples) {
1694                         /* Setup and copy work tile to device. */
1695                         wtile->start_sample = sample;
1696                         wtile->num_samples = min(step_samples, end_sample - sample);;
1697                         work_tiles.copy_to_device();
1698
1699                         CUdeviceptr d_work_tiles = cuda_device_ptr(work_tiles.device_pointer);
1700                         uint total_work_size = wtile->w * wtile->h * wtile->num_samples;
1701                         uint num_blocks = divide_up(total_work_size, num_threads_per_block);
1702
1703                         /* Launch kernel. */
1704                         void *args[] = {&d_work_tiles,
1705                                         &total_work_size};
1706
1707                         cuda_assert(cuLaunchKernel(cuPathTrace,
1708                                                    num_blocks, 1, 1,
1709                                                    num_threads_per_block, 1, 1,
1710                                                    0, 0, args, 0));
1711
1712                         cuda_assert(cuCtxSynchronize());
1713
1714                         /* Update progress. */
1715                         rtile.sample = sample + wtile->num_samples;
1716                         task.update_progress(&rtile, rtile.w*rtile.h*wtile->num_samples);
1717
1718                         if(task.get_cancel()) {
1719                                 if(task.need_finish_queue == false)
1720                                         break;
1721                         }
1722                 }
1723         }
1724
1725         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1726         {
1727                 if(have_error())
1728                         return;
1729
1730                 CUDAContextScope scope(this);
1731
1732                 CUfunction cuFilmConvert;
1733                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1734                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1735
1736                 /* get kernel function */
1737                 if(rgba_half) {
1738                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1739                 }
1740                 else {
1741                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1742                 }
1743
1744
1745                 float sample_scale = 1.0f/(task.sample + 1);
1746
1747                 /* pass in parameters */
1748                 void *args[] = {&d_rgba,
1749                                 &d_buffer,
1750                                 &sample_scale,
1751                                 &task.x,
1752                                 &task.y,
1753                                 &task.w,
1754                                 &task.h,
1755                                 &task.offset,
1756                                 &task.stride};
1757
1758                 /* launch kernel */
1759                 int threads_per_block;
1760                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1761
1762                 int xthreads = (int)sqrt(threads_per_block);
1763                 int ythreads = (int)sqrt(threads_per_block);
1764                 int xblocks = (task.w + xthreads - 1)/xthreads;
1765                 int yblocks = (task.h + ythreads - 1)/ythreads;
1766
1767                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1768
1769                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1770                                            xblocks , yblocks, 1, /* blocks */
1771                                            xthreads, ythreads, 1, /* threads */
1772                                            0, 0, args, 0));
1773
1774                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1775
1776                 cuda_assert(cuCtxSynchronize());
1777         }
1778
1779         void shader(DeviceTask& task)
1780         {
1781                 if(have_error())
1782                         return;
1783
1784                 CUDAContextScope scope(this);
1785
1786                 CUfunction cuShader;
1787                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1788                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1789
1790                 /* get kernel function */
1791                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1792                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1793                 }
1794                 else if(task.shader_eval_type == SHADER_EVAL_DISPLACE) {
1795                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_displace"));
1796                 }
1797                 else {
1798                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_background"));
1799                 }
1800
1801                 /* do tasks in smaller chunks, so we can cancel it */
1802                 const int shader_chunk_size = 65536;
1803                 const int start = task.shader_x;
1804                 const int end = task.shader_x + task.shader_w;
1805                 int offset = task.offset;
1806
1807                 bool canceled = false;
1808                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1809                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1810                                 int shader_w = min(shader_chunk_size, end - shader_x);
1811
1812                                 /* pass in parameters */
1813                                 void *args[8];
1814                                 int arg = 0;
1815                                 args[arg++] = &d_input;
1816                                 args[arg++] = &d_output;
1817                                 args[arg++] = &task.shader_eval_type;
1818                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1819                                         args[arg++] = &task.shader_filter;
1820                                 }
1821                                 args[arg++] = &shader_x;
1822                                 args[arg++] = &shader_w;
1823                                 args[arg++] = &offset;
1824                                 args[arg++] = &sample;
1825
1826                                 /* launch kernel */
1827                                 int threads_per_block;
1828                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1829
1830                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1831
1832                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1833                                 cuda_assert(cuLaunchKernel(cuShader,
1834                                                            xblocks , 1, 1, /* blocks */
1835                                                            threads_per_block, 1, 1, /* threads */
1836                                                            0, 0, args, 0));
1837
1838                                 cuda_assert(cuCtxSynchronize());
1839
1840                                 if(task.get_cancel()) {
1841                                         canceled = true;
1842                                         break;
1843                                 }
1844                         }
1845
1846                         task.update_progress(NULL);
1847                 }
1848         }
1849
1850         CUdeviceptr map_pixels(device_ptr mem)
1851         {
1852                 if(!background) {
1853                         PixelMem pmem = pixel_mem_map[mem];
1854                         CUdeviceptr buffer;
1855
1856                         size_t bytes;
1857                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1858                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1859
1860                         return buffer;
1861                 }
1862
1863                 return cuda_device_ptr(mem);
1864         }
1865
1866         void unmap_pixels(device_ptr mem)
1867         {
1868                 if(!background) {
1869                         PixelMem pmem = pixel_mem_map[mem];
1870
1871                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1872                 }
1873         }
1874
1875         void pixels_alloc(device_memory& mem)
1876         {
1877                 PixelMem pmem;
1878
1879                 pmem.w = mem.data_width;
1880                 pmem.h = mem.data_height;
1881
1882                 CUDAContextScope scope(this);
1883
1884                 glGenBuffers(1, &pmem.cuPBO);
1885                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1886                 if(mem.data_type == TYPE_HALF)
1887                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1888                 else
1889                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1890
1891                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1892
1893                 glGenTextures(1, &pmem.cuTexId);
1894                 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1895                 if(mem.data_type == TYPE_HALF)
1896                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1897                 else
1898                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1899                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1900                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1901                 glBindTexture(GL_TEXTURE_2D, 0);
1902
1903                 CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1904
1905                 if(result == CUDA_SUCCESS) {
1906                         mem.device_pointer = pmem.cuTexId;
1907                         pixel_mem_map[mem.device_pointer] = pmem;
1908
1909                         mem.device_size = mem.memory_size();
1910                         stats.mem_alloc(mem.device_size);
1911
1912                         return;
1913                 }
1914                 else {
1915                         /* failed to register buffer, fallback to no interop */
1916                         glDeleteBuffers(1, &pmem.cuPBO);
1917                         glDeleteTextures(1, &pmem.cuTexId);
1918
1919                         background = true;
1920                 }
1921         }
1922
1923         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1924         {
1925                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1926
1927                 CUDAContextScope scope(this);
1928
1929                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1930                 uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1931                 size_t offset = sizeof(uchar)*4*y*w;
1932                 memcpy((uchar*)mem.host_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1933                 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1934                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1935         }
1936
1937         void pixels_free(device_memory& mem)
1938         {
1939                 if(mem.device_pointer) {
1940                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1941
1942                         CUDAContextScope scope(this);
1943
1944                         cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
1945                         glDeleteBuffers(1, &pmem.cuPBO);
1946                         glDeleteTextures(1, &pmem.cuTexId);
1947
1948                         pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
1949                         mem.device_pointer = 0;
1950
1951                         stats.mem_free(mem.device_size);
1952                         mem.device_size = 0;
1953                 }
1954         }
1955
1956         void draw_pixels(
1957             device_memory& mem, int y,
1958             int w, int h, int width, int height,
1959             int dx, int dy, int dw, int dh, bool transparent,
1960                 const DeviceDrawParams &draw_params)
1961         {
1962                 assert(mem.type == MEM_PIXELS);
1963
1964                 if(!background) {
1965                         const bool use_fallback_shader = (draw_params.bind_display_space_shader_cb == NULL);
1966                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1967                         float *vpointer;
1968
1969                         CUDAContextScope scope(this);
1970
1971                         /* for multi devices, this assumes the inefficient method that we allocate
1972                          * all pixels on the device even though we only render to a subset */
1973                         size_t offset = 4*y*w;
1974
1975                         if(mem.data_type == TYPE_HALF)
1976                                 offset *= sizeof(GLhalf);
1977                         else
1978                                 offset *= sizeof(uint8_t);
1979
1980                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1981                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1982                         if(mem.data_type == TYPE_HALF) {
1983                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
1984                         }
1985                         else {
1986                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
1987                         }
1988                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1989
1990                         if(transparent) {
1991                                 glEnable(GL_BLEND);
1992                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
1993                         }
1994
1995                         GLint shader_program;
1996                         if(use_fallback_shader) {
1997                                 if(!bind_fallback_display_space_shader(dw, dh)) {
1998                                         return;
1999                                 }
2000                                 shader_program = fallback_shader_program;
2001                         }
2002                         else {
2003                                 draw_params.bind_display_space_shader_cb();
2004                                 glGetIntegerv(GL_CURRENT_PROGRAM, &shader_program);
2005                         }
2006
2007                         if(!vertex_buffer) {
2008                                 glGenBuffers(1, &vertex_buffer);
2009                         }
2010
2011                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
2012                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
2013                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
2014
2015                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
2016
2017                         if(vpointer) {
2018                                 /* texture coordinate - vertex pair */
2019                                 vpointer[0] = 0.0f;
2020                                 vpointer[1] = 0.0f;
2021                                 vpointer[2] = dx;
2022                                 vpointer[3] = dy;
2023
2024                                 vpointer[4] = (float)w/(float)pmem.w;
2025                                 vpointer[5] = 0.0f;
2026                                 vpointer[6] = (float)width + dx;
2027                                 vpointer[7] = dy;
2028
2029                                 vpointer[8] = (float)w/(float)pmem.w;
2030                                 vpointer[9] = (float)h/(float)pmem.h;
2031                                 vpointer[10] = (float)width + dx;
2032                                 vpointer[11] = (float)height + dy;
2033
2034                                 vpointer[12] = 0.0f;
2035                                 vpointer[13] = (float)h/(float)pmem.h;
2036                                 vpointer[14] = dx;
2037                                 vpointer[15] = (float)height + dy;
2038
2039                                 glUnmapBuffer(GL_ARRAY_BUFFER);
2040                         }
2041
2042                         GLuint vertex_array_object;
2043                         GLuint position_attribute, texcoord_attribute;
2044
2045                         glGenVertexArrays(1, &vertex_array_object);
2046                         glBindVertexArray(vertex_array_object);
2047
2048                         texcoord_attribute = glGetAttribLocation(shader_program, "texCoord");
2049                         position_attribute = glGetAttribLocation(shader_program, "pos");
2050
2051                         glEnableVertexAttribArray(texcoord_attribute);
2052                         glEnableVertexAttribArray(position_attribute);
2053
2054                         glVertexAttribPointer(texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
2055                         glVertexAttribPointer(position_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)(sizeof(float) * 2));
2056
2057                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
2058
2059                         if(use_fallback_shader) {
2060                                 glUseProgram(0);
2061                         }
2062                         else {
2063                                 draw_params.unbind_display_space_shader_cb();
2064                         }
2065
2066                         if(transparent) {
2067                                 glDisable(GL_BLEND);
2068                         }
2069
2070                         glBindTexture(GL_TEXTURE_2D, 0);
2071
2072                         return;
2073                 }
2074
2075                 Device::draw_pixels(mem, y, w, h, width, height, dx, dy, dw, dh, transparent, draw_params);
2076         }
2077
2078         void thread_run(DeviceTask *task)
2079         {
2080                 CUDAContextScope scope(this);
2081
2082                 if(task->type == DeviceTask::RENDER) {
2083                         DeviceRequestedFeatures requested_features;
2084                         if(use_split_kernel()) {
2085                                 if(split_kernel == NULL) {
2086                                         split_kernel = new CUDASplitKernel(this);
2087                                         split_kernel->load_kernels(requested_features);
2088                                 }
2089                         }
2090
2091                         device_vector<WorkTile> work_tiles(this, "work_tiles", MEM_READ_ONLY);
2092
2093                         /* keep rendering tiles until done */
2094                         RenderTile tile;
2095                         DenoisingTask denoising(this);
2096
2097                         while(task->acquire_tile(this, tile)) {
2098                                 if(tile.task == RenderTile::PATH_TRACE) {
2099                                         if(use_split_kernel()) {
2100                                                 device_only_memory<uchar> void_buffer(this, "void_buffer");
2101                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
2102                                         }
2103                                         else {
2104                                                 path_trace(*task, tile, work_tiles);
2105                                         }
2106                                 }
2107                                 else if(tile.task == RenderTile::DENOISE) {
2108                                         tile.sample = tile.start_sample + tile.num_samples;
2109
2110                                         denoise(tile, denoising, *task);
2111
2112                                         task->update_progress(&tile, tile.w*tile.h);
2113                                 }
2114
2115                                 task->release_tile(tile);
2116
2117                                 if(task->get_cancel()) {
2118                                         if(task->need_finish_queue == false)
2119                                                 break;
2120                                 }
2121                         }
2122
2123                         work_tiles.free();
2124                 }
2125                 else if(task->type == DeviceTask::SHADER) {
2126                         shader(*task);
2127
2128                         cuda_assert(cuCtxSynchronize());
2129                 }
2130         }
2131
2132         class CUDADeviceTask : public DeviceTask {
2133         public:
2134                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
2135                 : DeviceTask(task)
2136                 {
2137                         run = function_bind(&CUDADevice::thread_run, device, this);
2138                 }
2139         };
2140
2141         int get_split_task_count(DeviceTask& /*task*/)
2142         {
2143                 return 1;
2144         }
2145
2146         void task_add(DeviceTask& task)
2147         {
2148                 CUDAContextScope scope(this);
2149
2150                 /* Load texture info. */
2151                 load_texture_info();
2152
2153                 /* Synchronize all memory copies before executing task. */
2154                 cuda_assert(cuCtxSynchronize());
2155
2156                 if(task.type == DeviceTask::FILM_CONVERT) {
2157                         /* must be done in main thread due to opengl access */
2158                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
2159                 }
2160                 else {
2161                         task_pool.push(new CUDADeviceTask(this, task));
2162                 }
2163         }
2164
2165         void task_wait()
2166         {
2167                 task_pool.wait();
2168         }
2169
2170         void task_cancel()
2171         {
2172                 task_pool.cancel();
2173         }
2174
2175         friend class CUDASplitKernelFunction;
2176         friend class CUDASplitKernel;
2177         friend class CUDAContextScope;
2178 };
2179
2180 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
2181  * now that the definition of that class is complete
2182  */
2183 #undef cuda_assert
2184 #define cuda_assert(stmt) \
2185         { \
2186                 CUresult result = stmt; \
2187                 \
2188                 if(result != CUDA_SUCCESS) { \
2189                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
2190                         if(device->error_msg == "") \
2191                                 device->error_msg = message; \
2192                         fprintf(stderr, "%s\n", message.c_str()); \
2193                         /*cuda_abort();*/ \
2194                         device->cuda_error_documentation(); \
2195                 } \
2196         } (void)0
2197
2198
2199 /* CUDA context scope. */
2200
2201 CUDAContextScope::CUDAContextScope(CUDADevice *device)
2202 : device(device)
2203 {
2204         cuda_assert(cuCtxPushCurrent(device->cuContext));
2205 }
2206
2207 CUDAContextScope::~CUDAContextScope()
2208 {
2209         cuda_assert(cuCtxPopCurrent(NULL));
2210 }
2211
2212 /* split kernel */
2213
2214 class CUDASplitKernelFunction : public SplitKernelFunction{
2215         CUDADevice* device;
2216         CUfunction func;
2217 public:
2218         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
2219
2220         /* enqueue the kernel, returns false if there is an error */
2221         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
2222         {
2223                 return enqueue(dim, NULL);
2224         }
2225
2226         /* enqueue the kernel, returns false if there is an error */
2227         bool enqueue(const KernelDimensions &dim, void *args[])
2228         {
2229                 if(device->have_error())
2230                         return false;
2231
2232                 CUDAContextScope scope(device);
2233
2234                 /* we ignore dim.local_size for now, as this is faster */
2235                 int threads_per_block;
2236                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
2237
2238                 int xblocks = (dim.global_size[0]*dim.global_size[1] + threads_per_block - 1)/threads_per_block;
2239
2240                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
2241
2242                 cuda_assert(cuLaunchKernel(func,
2243                                            xblocks, 1, 1, /* blocks */
2244                                            threads_per_block, 1, 1, /* threads */
2245                                            0, 0, args, 0));
2246
2247                 return !device->have_error();
2248         }
2249 };
2250
2251 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
2252 {
2253 }
2254
2255 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
2256 {
2257         CUDAContextScope scope(device);
2258
2259         device_vector<uint64_t> size_buffer(device, "size_buffer", MEM_READ_WRITE);
2260         size_buffer.alloc(1);
2261         size_buffer.zero_to_device();
2262
2263         uint threads = num_threads;
2264         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
2265
2266         struct args_t {
2267                 uint* num_threads;
2268                 CUdeviceptr* size;
2269         };
2270
2271         args_t args = {
2272                 &threads,
2273                 &d_size
2274         };
2275
2276         CUfunction state_buffer_size;
2277         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
2278
2279         cuda_assert(cuLaunchKernel(state_buffer_size,
2280                                    1, 1, 1,
2281                                    1, 1, 1,
2282                                    0, 0, (void**)&args, 0));
2283
2284         size_buffer.copy_from_device(0, 1, 1);
2285         size_t size = size_buffer[0];
2286         size_buffer.free();
2287
2288         return size;
2289 }
2290
2291 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
2292                                     RenderTile& rtile,
2293                                     int num_global_elements,
2294                                     device_memory& /*kernel_globals*/,
2295                                     device_memory& /*kernel_data*/,
2296                                     device_memory& split_data,
2297                                     device_memory& ray_state,
2298                                     device_memory& queue_index,
2299                                     device_memory& use_queues_flag,
2300                                     device_memory& work_pool_wgs)
2301 {
2302         CUDAContextScope scope(device);
2303
2304         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
2305         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
2306         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
2307         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
2308         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
2309
2310         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
2311
2312         int end_sample = rtile.start_sample + rtile.num_samples;
2313         int queue_size = dim.global_size[0] * dim.global_size[1];
2314
2315         struct args_t {
2316                 CUdeviceptr* split_data_buffer;
2317                 int* num_elements;
2318                 CUdeviceptr* ray_state;
2319                 int* start_sample;
2320                 int* end_sample;
2321                 int* sx;
2322                 int* sy;
2323                 int* sw;
2324                 int* sh;
2325                 int* offset;
2326                 int* stride;
2327                 CUdeviceptr* queue_index;
2328                 int* queuesize;
2329                 CUdeviceptr* use_queues_flag;
2330                 CUdeviceptr* work_pool_wgs;
2331                 int* num_samples;
2332                 CUdeviceptr* buffer;
2333         };
2334
2335         args_t args = {
2336                 &d_split_data,
2337                 &num_global_elements,
2338                 &d_ray_state,
2339                 &rtile.start_sample,
2340                 &end_sample,
2341                 &rtile.x,
2342                 &rtile.y,
2343                 &rtile.w,
2344                 &rtile.h,
2345                 &rtile.offset,
2346                 &rtile.stride,
2347                 &d_queue_index,
2348                 &queue_size,
2349                 &d_use_queues_flag,
2350                 &d_work_pool_wgs,
2351                 &rtile.num_samples,
2352                 &d_buffer
2353         };
2354
2355         CUfunction data_init;
2356         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2357         if(device->have_error()) {
2358                 return false;
2359         }
2360
2361         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2362
2363         return !device->have_error();
2364 }
2365
2366 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2367                                                                 const DeviceRequestedFeatures&)
2368 {
2369         CUDAContextScope scope(device);
2370         CUfunction func;
2371
2372         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2373         if(device->have_error()) {
2374                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2375                 return NULL;
2376         }
2377
2378         return new CUDASplitKernelFunction(device, func);
2379 }
2380
2381 int2 CUDASplitKernel::split_kernel_local_size()
2382 {
2383         return make_int2(32, 1);
2384 }
2385
2386 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2387 {
2388         CUDAContextScope scope(device);
2389         size_t free;
2390         size_t total;
2391
2392         cuda_assert(cuMemGetInfo(&free, &total));
2393
2394         VLOG(1) << "Maximum device allocation size: "
2395                 << string_human_readable_number(free) << " bytes. ("
2396                 << string_human_readable_size(free) << ").";
2397
2398         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2399         size_t side = round_down((int)sqrt(num_elements), 32);
2400         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2401         VLOG(1) << "Global size: " << global_size << ".";
2402         return global_size;
2403 }
2404
2405 bool device_cuda_init(void)
2406 {
2407 #ifdef WITH_CUDA_DYNLOAD
2408         static bool initialized = false;
2409         static bool result = false;
2410
2411         if(initialized)
2412                 return result;
2413
2414         initialized = true;
2415         int cuew_result = cuewInit(CUEW_INIT_CUDA);
2416         if(cuew_result == CUEW_SUCCESS) {
2417                 VLOG(1) << "CUEW initialization succeeded";
2418                 if(CUDADevice::have_precompiled_kernels()) {
2419                         VLOG(1) << "Found precompiled kernels";
2420                         result = true;
2421                 }
2422 #ifndef _WIN32
2423                 else if(cuewCompilerPath() != NULL) {
2424                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2425                         result = true;
2426                 }
2427                 else {
2428                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler was found,"
2429                                 << " unable to use CUDA";
2430                 }
2431 #endif
2432         }
2433         else {
2434                 VLOG(1) << "CUEW initialization failed: "
2435                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2436                             ? "Error setting up atexit() handler"
2437                             : "Error opening the library");
2438         }
2439
2440         return result;
2441 #else  /* WITH_CUDA_DYNLOAD */
2442         return true;
2443 #endif /* WITH_CUDA_DYNLOAD */
2444 }
2445
2446 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2447 {
2448         return new CUDADevice(info, stats, background);
2449 }
2450
2451 static CUresult device_cuda_safe_init()
2452 {
2453 #ifdef _WIN32
2454         __try {
2455                 return cuInit(0);
2456         }
2457         __except(EXCEPTION_EXECUTE_HANDLER) {
2458                 /* Ignore crashes inside the CUDA driver and hope we can
2459                  * survive even with corrupted CUDA installs. */
2460                 fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");
2461         }
2462
2463         return CUDA_ERROR_NO_DEVICE;
2464 #else
2465         return cuInit(0);
2466 #endif
2467 }
2468
2469 void device_cuda_info(vector<DeviceInfo>& devices)
2470 {
2471         CUresult result = device_cuda_safe_init();
2472         if(result != CUDA_SUCCESS) {
2473                 if(result != CUDA_ERROR_NO_DEVICE)
2474                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2475                 return;
2476         }
2477
2478         int count = 0;
2479         result = cuDeviceGetCount(&count);
2480         if(result != CUDA_SUCCESS) {
2481                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2482                 return;
2483         }
2484
2485         vector<DeviceInfo> display_devices;
2486
2487         for(int num = 0; num < count; num++) {
2488                 char name[256];
2489
2490                 result = cuDeviceGetName(name, 256, num);
2491                 if(result != CUDA_SUCCESS) {
2492                         fprintf(stderr, "CUDA cuDeviceGetName: %s\n", cuewErrorString(result));
2493                         continue;
2494                 }
2495
2496                 int major;
2497                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2498                 if(major < 3) {
2499                         VLOG(1) << "Ignoring device \"" << name
2500                                 << "\", this graphics card is no longer supported.";
2501                         continue;
2502                 }
2503
2504                 DeviceInfo info;
2505
2506                 info.type = DEVICE_CUDA;
2507                 info.description = string(name);
2508                 info.num = num;
2509
2510                 info.advanced_shading = (major >= 3);
2511                 info.has_half_images = (major >= 3);
2512                 info.has_volume_decoupled = false;
2513                 info.bvh_layout_mask = BVH_LAYOUT_BVH2;
2514
2515                 int pci_location[3] = {0, 0, 0};
2516                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2517                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2518                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2519                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2520                                         name,
2521                                         (unsigned int)pci_location[0],
2522                                         (unsigned int)pci_location[1],
2523                                         (unsigned int)pci_location[2]);
2524
2525                 /* If device has a kernel timeout and no compute preemption, we assume
2526                  * it is connected to a display and will freeze the display while doing
2527                  * computations. */
2528                 int timeout_attr = 0, preempt_attr = 0;
2529                 cuDeviceGetAttribute(&timeout_attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num);
2530                 cuDeviceGetAttribute(&preempt_attr, CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, num);
2531
2532                 if(timeout_attr && !preempt_attr) {
2533                         VLOG(1) << "Device is recognized as display.";
2534                         info.description += " (Display)";
2535                         info.display_device = true;
2536                         display_devices.push_back(info);
2537                 }
2538                 else {
2539                         devices.push_back(info);
2540                 }
2541                 VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
2542         }
2543
2544         if(!display_devices.empty())
2545                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2546 }
2547
2548 string device_cuda_capabilities(void)
2549 {
2550         CUresult result = device_cuda_safe_init();
2551         if(result != CUDA_SUCCESS) {
2552                 if(result != CUDA_ERROR_NO_DEVICE) {
2553                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2554                 }
2555                 return "No CUDA device found\n";
2556         }
2557
2558         int count;
2559         result = cuDeviceGetCount(&count);
2560         if(result != CUDA_SUCCESS) {
2561                 return string("Error getting devices: ") + cuewErrorString(result);
2562         }
2563
2564         string capabilities = "";
2565         for(int num = 0; num < count; num++) {
2566                 char name[256];
2567                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2568                         continue;
2569                 }
2570                 capabilities += string("\t") + name + "\n";
2571                 int value;
2572 #define GET_ATTR(attr) \
2573                 { \
2574                         if(cuDeviceGetAttribute(&value, \
2575                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2576                                                 num) == CUDA_SUCCESS) \
2577                         { \
2578                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2579                                                               value); \
2580                         } \
2581                 } (void)0
2582                 /* TODO(sergey): Strip all attributes which are not useful for us
2583                  * or does not depend on the driver.
2584                  */
2585                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2586                 GET_ATTR(MAX_BLOCK_DIM_X);
2587                 GET_ATTR(MAX_BLOCK_DIM_Y);
2588                 GET_ATTR(MAX_BLOCK_DIM_Z);
2589                 GET_ATTR(MAX_GRID_DIM_X);
2590                 GET_ATTR(MAX_GRID_DIM_Y);
2591                 GET_ATTR(MAX_GRID_DIM_Z);
2592                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2593                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2594                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2595                 GET_ATTR(WARP_SIZE);
2596                 GET_ATTR(MAX_PITCH);
2597                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2598                 GET_ATTR(REGISTERS_PER_BLOCK);
2599                 GET_ATTR(CLOCK_RATE);
2600                 GET_ATTR(TEXTURE_ALIGNMENT);
2601                 GET_ATTR(GPU_OVERLAP);
2602                 GET_ATTR(MULTIPROCESSOR_COUNT);
2603                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2604                 GET_ATTR(INTEGRATED);
2605                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2606                 GET_ATTR(COMPUTE_MODE);
2607                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2608                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2609                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2610                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2611                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2612                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2613                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2614                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2615                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2616                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2617                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2618                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2619                 GET_ATTR(SURFACE_ALIGNMENT);
2620                 GET_ATTR(CONCURRENT_KERNELS);
2621                 GET_ATTR(ECC_ENABLED);
2622                 GET_ATTR(TCC_DRIVER);
2623                 GET_ATTR(MEMORY_CLOCK_RATE);
2624                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2625                 GET_ATTR(L2_CACHE_SIZE);
2626                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2627                 GET_ATTR(ASYNC_ENGINE_COUNT);
2628                 GET_ATTR(UNIFIED_ADDRESSING);
2629                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2630                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2631                 GET_ATTR(CAN_TEX2D_GATHER);
2632                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2633                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2634                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2635                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2636                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2637                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2638                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2639                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2640                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2641                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2642                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2643                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2644                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2645                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2646                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2647                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2648                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2649                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2650                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2651                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2652                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2653                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2654                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2655                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2656                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2657                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2658                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2659                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2660                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2661                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2662                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2663                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2664                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2665                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2666                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2667                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2668                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2669                 GET_ATTR(MANAGED_MEMORY);
2670                 GET_ATTR(MULTI_GPU_BOARD);
2671                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2672 #undef GET_ATTR
2673                 capabilities += "\n";
2674         }
2675
2676         return capabilities;
2677 }
2678
2679 CCL_NAMESPACE_END