f3548640679482578ae9d8c8c84f0aa9e5eff1c3
[blender.git] / intern / cycles / device / device_cuda.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <climits>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "device/device.h"
24 #include "device/device_denoising.h"
25 #include "device/device_intern.h"
26 #include "device/device_split_kernel.h"
27
28 #include "render/buffers.h"
29
30 #include "kernel/filter/filter_defines.h"
31
32 #ifdef WITH_CUDA_DYNLOAD
33 #  include "cuew.h"
34 #else
35 #  include "util/util_opengl.h"
36 #  include <cuda.h>
37 #  include <cudaGL.h>
38 #endif
39 #include "util/util_debug.h"
40 #include "util/util_foreach.h"
41 #include "util/util_logging.h"
42 #include "util/util_map.h"
43 #include "util/util_md5.h"
44 #include "util/util_opengl.h"
45 #include "util/util_path.h"
46 #include "util/util_string.h"
47 #include "util/util_system.h"
48 #include "util/util_types.h"
49 #include "util/util_time.h"
50
51 #include "kernel/split/kernel_split_data_types.h"
52
53 CCL_NAMESPACE_BEGIN
54
55 #ifndef WITH_CUDA_DYNLOAD
56
57 /* Transparently implement some functions, so majority of the file does not need
58  * to worry about difference between dynamically loaded and linked CUDA at all.
59  */
60
61 namespace {
62
63 const char *cuewErrorString(CUresult result)
64 {
65         /* We can only give error code here without major code duplication, that
66          * should be enough since dynamic loading is only being disabled by folks
67          * who knows what they're doing anyway.
68          *
69          * NOTE: Avoid call from several threads.
70          */
71         static string error;
72         error = string_printf("%d", result);
73         return error.c_str();
74 }
75
76 const char *cuewCompilerPath(void)
77 {
78         return CYCLES_CUDA_NVCC_EXECUTABLE;
79 }
80
81 int cuewCompilerVersion(void)
82 {
83         return (CUDA_VERSION / 100) + (CUDA_VERSION % 100 / 10);
84 }
85
86 }  /* namespace */
87 #endif  /* WITH_CUDA_DYNLOAD */
88
89 class CUDADevice;
90
91 class CUDASplitKernel : public DeviceSplitKernel {
92         CUDADevice *device;
93 public:
94         explicit CUDASplitKernel(CUDADevice *device);
95
96         virtual uint64_t state_buffer_size(device_memory& kg, device_memory& data, size_t num_threads);
97
98         virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
99                                                     RenderTile& rtile,
100                                                     int num_global_elements,
101                                                     device_memory& kernel_globals,
102                                                     device_memory& kernel_data_,
103                                                     device_memory& split_data,
104                                                     device_memory& ray_state,
105                                                     device_memory& queue_index,
106                                                     device_memory& use_queues_flag,
107                                                     device_memory& work_pool_wgs);
108
109         virtual SplitKernelFunction* get_split_kernel_function(const string& kernel_name,
110                                                                const DeviceRequestedFeatures&);
111         virtual int2 split_kernel_local_size();
112         virtual int2 split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask *task);
113 };
114
115 /* Utility to push/pop CUDA context. */
116 class CUDAContextScope {
117 public:
118         CUDAContextScope(CUDADevice *device);
119         ~CUDAContextScope();
120
121 private:
122         CUDADevice *device;
123 };
124
125 class CUDADevice : public Device
126 {
127 public:
128         DedicatedTaskPool task_pool;
129         CUdevice cuDevice;
130         CUcontext cuContext;
131         CUmodule cuModule, cuFilterModule;
132         size_t device_texture_headroom;
133         size_t device_working_headroom;
134         bool move_texture_to_host;
135         size_t map_host_used;
136         size_t map_host_limit;
137         int can_map_host;
138         int cuDevId;
139         int cuDevArchitecture;
140         bool first_error;
141         CUDASplitKernel *split_kernel;
142
143         struct CUDAMem {
144                 CUDAMem()
145                 : texobject(0), array(0), map_host_pointer(0), free_map_host(false) {}
146
147                 CUtexObject texobject;
148                 CUarray array;
149                 void *map_host_pointer;
150                 bool free_map_host;
151         };
152         typedef map<device_memory*, CUDAMem> CUDAMemMap;
153         CUDAMemMap cuda_mem_map;
154
155         struct PixelMem {
156                 GLuint cuPBO;
157                 CUgraphicsResource cuPBOresource;
158                 GLuint cuTexId;
159                 int w, h;
160         };
161         map<device_ptr, PixelMem> pixel_mem_map;
162
163         /* Bindless Textures */
164         device_vector<TextureInfo> texture_info;
165         bool need_texture_info;
166
167         CUdeviceptr cuda_device_ptr(device_ptr mem)
168         {
169                 return (CUdeviceptr)mem;
170         }
171
172         static bool have_precompiled_kernels()
173         {
174                 string cubins_path = path_get("lib");
175                 return path_exists(cubins_path);
176         }
177
178         virtual bool show_samples() const
179         {
180                 /* The CUDADevice only processes one tile at a time, so showing samples is fine. */
181                 return true;
182         }
183
184 /*#ifdef NDEBUG
185 #define cuda_abort()
186 #else
187 #define cuda_abort() abort()
188 #endif*/
189         void cuda_error_documentation()
190         {
191                 if(first_error) {
192                         fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
193                         fprintf(stderr, "https://docs.blender.org/manual/en/dev/render/cycles/gpu_rendering.html\n\n");
194                         first_error = false;
195                 }
196         }
197
198 #define cuda_assert(stmt) \
199         { \
200                 CUresult result = stmt; \
201                 \
202                 if(result != CUDA_SUCCESS) { \
203                         string message = string_printf("CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \
204                         if(error_msg == "") \
205                                 error_msg = message; \
206                         fprintf(stderr, "%s\n", message.c_str()); \
207                         /*cuda_abort();*/ \
208                         cuda_error_documentation(); \
209                 } \
210         } (void)0
211
212         bool cuda_error_(CUresult result, const string& stmt)
213         {
214                 if(result == CUDA_SUCCESS)
215                         return false;
216
217                 string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result));
218                 if(error_msg == "")
219                         error_msg = message;
220                 fprintf(stderr, "%s\n", message.c_str());
221                 cuda_error_documentation();
222                 return true;
223         }
224
225 #define cuda_error(stmt) cuda_error_(stmt, #stmt)
226
227         void cuda_error_message(const string& message)
228         {
229                 if(error_msg == "")
230                         error_msg = message;
231                 fprintf(stderr, "%s\n", message.c_str());
232                 cuda_error_documentation();
233         }
234
235         CUDADevice(DeviceInfo& info, Stats &stats, bool background_)
236         : Device(info, stats, background_),
237           texture_info(this, "__texture_info", MEM_TEXTURE)
238         {
239                 first_error = true;
240                 background = background_;
241
242                 cuDevId = info.num;
243                 cuDevice = 0;
244                 cuContext = 0;
245
246                 cuModule = 0;
247                 cuFilterModule = 0;
248
249                 split_kernel = NULL;
250
251                 need_texture_info = false;
252
253                 device_texture_headroom = 0;
254                 device_working_headroom = 0;
255                 move_texture_to_host = false;
256                 map_host_limit = 0;
257                 map_host_used = 0;
258                 can_map_host = 0;
259
260                 /* Intialize CUDA. */
261                 if(cuda_error(cuInit(0)))
262                         return;
263
264                 /* Setup device and context. */
265                 if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
266                         return;
267
268                 /* CU_CTX_MAP_HOST for mapping host memory when out of device memory.
269                  * CU_CTX_LMEM_RESIZE_TO_MAX for reserving local memory ahead of render,
270                  * so we can predict which memory to map to host. */
271                 cuda_assert(cuDeviceGetAttribute(&can_map_host, CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, cuDevice));
272
273                 unsigned int ctx_flags = CU_CTX_LMEM_RESIZE_TO_MAX;
274                 if(can_map_host) {
275                         ctx_flags |= CU_CTX_MAP_HOST;
276                         init_host_memory();
277                 }
278
279                 /* Create context. */
280                 CUresult result;
281
282                 if(background) {
283                         result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
284                 }
285                 else {
286                         result = cuGLCtxCreate(&cuContext, ctx_flags, cuDevice);
287
288                         if(result != CUDA_SUCCESS) {
289                                 result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
290                                 background = true;
291                         }
292                 }
293
294                 if(cuda_error_(result, "cuCtxCreate"))
295                         return;
296
297                 int major, minor;
298                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
299                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
300                 cuDevArchitecture = major*100 + minor*10;
301
302                 /* Pop context set by cuCtxCreate. */
303                 cuCtxPopCurrent(NULL);
304         }
305
306         ~CUDADevice()
307         {
308                 task_pool.stop();
309
310                 delete split_kernel;
311
312                 if(!info.has_fermi_limits) {
313                         texture_info.free();
314                 }
315
316                 cuda_assert(cuCtxDestroy(cuContext));
317         }
318
319         bool support_device(const DeviceRequestedFeatures& /*requested_features*/)
320         {
321                 int major, minor;
322                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
323                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
324
325                 /* We only support sm_20 and above */
326                 if(major < 2) {
327                         cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
328                         return false;
329                 }
330
331                 return true;
332         }
333
334         bool use_adaptive_compilation()
335         {
336                 return DebugFlags().cuda.adaptive_compile;
337         }
338
339         bool use_split_kernel()
340         {
341                 return DebugFlags().cuda.split_kernel;
342         }
343
344         /* Common NVCC flags which stays the same regardless of shading model,
345          * kernel sources md5 and only depends on compiler or compilation settings.
346          */
347         string compile_kernel_get_common_cflags(
348                 const DeviceRequestedFeatures& requested_features,
349                 bool filter=false, bool split=false)
350         {
351                 const int cuda_version = cuewCompilerVersion();
352                 const int machine = system_cpu_bits();
353                 const string source_path = path_get("source");
354                 const string include_path = source_path;
355                 string cflags = string_printf("-m%d "
356                                               "--ptxas-options=\"-v\" "
357                                               "--use_fast_math "
358                                               "-DNVCC "
359                                               "-D__KERNEL_CUDA_VERSION__=%d "
360                                                "-I\"%s\"",
361                                               machine,
362                                               cuda_version,
363                                               include_path.c_str());
364                 if(!filter && use_adaptive_compilation()) {
365                         cflags += " " + requested_features.get_build_options();
366                 }
367                 const char *extra_cflags = getenv("CYCLES_CUDA_EXTRA_CFLAGS");
368                 if(extra_cflags) {
369                         cflags += string(" ") + string(extra_cflags);
370                 }
371 #ifdef WITH_CYCLES_DEBUG
372                 cflags += " -D__KERNEL_DEBUG__";
373 #endif
374
375                 if(split) {
376                         cflags += " -D__SPLIT__";
377                 }
378
379                 return cflags;
380         }
381
382         bool compile_check_compiler() {
383                 const char *nvcc = cuewCompilerPath();
384                 if(nvcc == NULL) {
385                         cuda_error_message("CUDA nvcc compiler not found. "
386                                            "Install CUDA toolkit in default location.");
387                         return false;
388                 }
389                 const int cuda_version = cuewCompilerVersion();
390                 VLOG(1) << "Found nvcc " << nvcc
391                         << ", CUDA version " << cuda_version
392                         << ".";
393                 const int major = cuda_version / 10, minor = cuda_version & 10;
394                 if(cuda_version == 0) {
395                         cuda_error_message("CUDA nvcc compiler version could not be parsed.");
396                         return false;
397                 }
398                 if(cuda_version < 80) {
399                         printf("Unsupported CUDA version %d.%d detected, "
400                                "you need CUDA 8.0 or newer.\n",
401                                major, minor);
402                         return false;
403                 }
404                 else if(cuda_version != 80) {
405                         printf("CUDA version %d.%d detected, build may succeed but only "
406                                "CUDA 8.0 is officially supported.\n",
407                                major, minor);
408                 }
409                 return true;
410         }
411
412         string compile_kernel(const DeviceRequestedFeatures& requested_features,
413                               bool filter=false, bool split=false)
414         {
415                 const char *name, *source;
416                 if(filter) {
417                         name = "filter";
418                         source = "filter.cu";
419                 }
420                 else if(split) {
421                         name = "kernel_split";
422                         source = "kernel_split.cu";
423                 }
424                 else {
425                         name = "kernel";
426                         source = "kernel.cu";
427                 }
428                 /* Compute cubin name. */
429                 int major, minor;
430                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
431                 cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
432
433                 /* Attempt to use kernel provided with Blender. */
434                 if(!use_adaptive_compilation()) {
435                         const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin",
436                                                                     name, major, minor));
437                         VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
438                         if(path_exists(cubin)) {
439                                 VLOG(1) << "Using precompiled kernel.";
440                                 return cubin;
441                         }
442                 }
443
444                 const string common_cflags =
445                         compile_kernel_get_common_cflags(requested_features, filter, split);
446
447                 /* Try to use locally compiled kernel. */
448                 const string source_path = path_get("source");
449                 const string kernel_md5 = path_files_md5_hash(source_path);
450
451                 /* We include cflags into md5 so changing cuda toolkit or changing other
452                  * compiler command line arguments makes sure cubin gets re-built.
453                  */
454                 const string cubin_md5 = util_md5_string(kernel_md5 + common_cflags);
455
456                 const string cubin_file = string_printf("cycles_%s_sm%d%d_%s.cubin",
457                                                         name, major, minor,
458                                                         cubin_md5.c_str());
459                 const string cubin = path_cache_get(path_join("kernels", cubin_file));
460                 VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
461                 if(path_exists(cubin)) {
462                         VLOG(1) << "Using locally compiled kernel.";
463                         return cubin;
464                 }
465
466 #ifdef _WIN32
467                 if(have_precompiled_kernels()) {
468                         if(major < 2) {
469                                 cuda_error_message(string_printf(
470                                         "CUDA device requires compute capability 2.0 or up, "
471                                         "found %d.%d. Your GPU is not supported.",
472                                         major, minor));
473                         }
474                         else {
475                                 cuda_error_message(string_printf(
476                                         "CUDA binary kernel for this graphics card compute "
477                                         "capability (%d.%d) not found.",
478                                         major, minor));
479                         }
480                         return "";
481                 }
482 #endif
483
484                 /* Compile. */
485                 if(!compile_check_compiler()) {
486                         return "";
487                 }
488                 const char *nvcc = cuewCompilerPath();
489                 const string kernel = path_join(
490                         path_join(source_path, "kernel"),
491                         path_join("kernels",
492                                   path_join("cuda", source)));
493                 double starttime = time_dt();
494                 printf("Compiling CUDA kernel ...\n");
495
496                 path_create_directories(cubin);
497
498                 string command = string_printf("\"%s\" "
499                                                "-arch=sm_%d%d "
500                                                "--cubin \"%s\" "
501                                                "-o \"%s\" "
502                                                "%s ",
503                                                nvcc,
504                                                major, minor,
505                                                kernel.c_str(),
506                                                cubin.c_str(),
507                                                common_cflags.c_str());
508
509                 printf("%s\n", command.c_str());
510
511                 if(system(command.c_str()) == -1) {
512                         cuda_error_message("Failed to execute compilation command, "
513                                            "see console for details.");
514                         return "";
515                 }
516
517                 /* Verify if compilation succeeded */
518                 if(!path_exists(cubin)) {
519                         cuda_error_message("CUDA kernel compilation failed, "
520                                            "see console for details.");
521                         return "";
522                 }
523
524                 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
525
526                 return cubin;
527         }
528
529         bool load_kernels(const DeviceRequestedFeatures& requested_features)
530         {
531                 /* TODO(sergey): Support kernels re-load for CUDA devices.
532                  *
533                  * Currently re-loading kernel will invalidate memory pointers,
534                  * causing problems in cuCtxSynchronize.
535                  */
536                 if(cuFilterModule && cuModule) {
537                         VLOG(1) << "Skipping kernel reload, not currently supported.";
538                         return true;
539                 }
540
541                 /* check if cuda init succeeded */
542                 if(cuContext == 0)
543                         return false;
544
545                 /* check if GPU is supported */
546                 if(!support_device(requested_features))
547                         return false;
548
549                 /* get kernel */
550                 string cubin = compile_kernel(requested_features, false, use_split_kernel());
551                 if(cubin == "")
552                         return false;
553
554                 string filter_cubin = compile_kernel(requested_features, true, false);
555                 if(filter_cubin == "")
556                         return false;
557
558                 /* open module */
559                 CUDAContextScope scope(this);
560
561                 string cubin_data;
562                 CUresult result;
563
564                 if(path_read_text(cubin, cubin_data))
565                         result = cuModuleLoadData(&cuModule, cubin_data.c_str());
566                 else
567                         result = CUDA_ERROR_FILE_NOT_FOUND;
568
569                 if(cuda_error_(result, "cuModuleLoad"))
570                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", cubin.c_str()));
571
572                 if(path_read_text(filter_cubin, cubin_data))
573                         result = cuModuleLoadData(&cuFilterModule, cubin_data.c_str());
574                 else
575                         result = CUDA_ERROR_FILE_NOT_FOUND;
576
577                 if(cuda_error_(result, "cuModuleLoad"))
578                         cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
579
580                 if(result == CUDA_SUCCESS) {
581                         reserve_local_memory(requested_features);
582                 }
583
584                 return (result == CUDA_SUCCESS);
585         }
586
587         void reserve_local_memory(const DeviceRequestedFeatures& requested_features)
588         {
589                 if(use_split_kernel()) {
590                         /* Split kernel mostly uses global memory and adaptive compilation,
591                          * difficult to predict how much is needed currently. */
592                         return;
593                 }
594
595                 /* Together with CU_CTX_LMEM_RESIZE_TO_MAX, this reserves local memory
596                  * needed for kernel launches, so that we can reliably figure out when
597                  * to allocate scene data in mapped host memory. */
598                 CUDAContextScope scope(this);
599
600                 size_t total = 0, free_before = 0, free_after = 0;
601                 cuMemGetInfo(&free_before, &total);
602
603                 /* Get kernel function. */
604                 CUfunction cuPathTrace;
605
606                 if(requested_features.use_integrator_branched) {
607                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
608                 }
609                 else {
610                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
611                 }
612
613                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
614
615                 int min_blocks, num_threads_per_block;
616                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
617
618                 /* Launch kernel, using just 1 block appears sufficient to reserve
619                  * memory for all multiprocessors. It would be good to do this in
620                  * parallel for the multi GPU case still to make it faster. */
621                 CUdeviceptr d_work_tiles = 0;
622                 uint total_work_size = 0;
623
624                 void *args[] = {&d_work_tiles,
625                                 &total_work_size};
626
627                 cuda_assert(cuLaunchKernel(cuPathTrace,
628                                            1, 1, 1,
629                                            num_threads_per_block, 1, 1,
630                                            0, 0, args, 0));
631
632                 cuda_assert(cuCtxSynchronize());
633
634                 cuMemGetInfo(&free_after, &total);
635                 VLOG(1) << "Local memory reserved "
636                         << string_human_readable_number(free_before - free_after) << " bytes. ("
637                         << string_human_readable_size(free_before - free_after) << ")";
638
639 #if 0
640                 /* For testing mapped host memory, fill up device memory. */
641                 const size_t keep_mb = 1024;
642
643                 while(free_after > keep_mb * 1024 * 1024LL) {
644                         CUdeviceptr tmp;
645                         cuda_assert(cuMemAlloc(&tmp, 10 * 1024 * 1024LL));
646                         cuMemGetInfo(&free_after, &total);
647                 }
648 #endif
649         }
650
651         void init_host_memory()
652         {
653                 /* Limit amount of host mapped memory, because allocating too much can
654                  * cause system instability. Leave at least half or 4 GB of system
655                  * memory free, whichever is smaller. */
656                 size_t default_limit = 4 * 1024 * 1024 * 1024LL;
657                 size_t system_ram = system_physical_ram();
658
659                 if(system_ram > 0) {
660                         if(system_ram / 2 > default_limit) {
661                                 map_host_limit = system_ram - default_limit;
662                         }
663                         else {
664                                 map_host_limit = system_ram / 2;
665                         }
666                 }
667                 else {
668                         VLOG(1) << "Mapped host memory disabled, failed to get system RAM";
669                         map_host_limit = 0;
670                 }
671
672                 /* Amount of device memory to keep is free after texture memory
673                  * and working memory allocations respectively. We set the working
674                  * memory limit headroom lower so that some space is left after all
675                  * texture memory allocations. */
676                 device_working_headroom = 32 * 1024 * 1024LL; // 32MB
677                 device_texture_headroom = 128 * 1024 * 1024LL; // 128MB
678
679                 VLOG(1) << "Mapped host memory limit set to "
680                         << string_human_readable_number(map_host_limit) << " bytes. ("
681                         << string_human_readable_size(map_host_limit) << ")";
682         }
683
684         void load_texture_info()
685         {
686                 if(!info.has_fermi_limits && need_texture_info) {
687                         texture_info.copy_to_device();
688                         need_texture_info = false;
689                 }
690         }
691
692         void move_textures_to_host(size_t size, bool for_texture)
693         {
694                 /* Signal to reallocate textures in host memory only. */
695                 move_texture_to_host = true;
696
697                 while(size > 0) {
698                         /* Find suitable memory allocation to move. */
699                         device_memory *max_mem = NULL;
700                         size_t max_size = 0;
701                         bool max_is_image = false;
702
703                         foreach(CUDAMemMap::value_type& pair, cuda_mem_map) {
704                                 device_memory& mem = *pair.first;
705                                 CUDAMem *cmem = &pair.second;
706
707                                 bool is_texture = (mem.type == MEM_TEXTURE) && (&mem != &texture_info);
708                                 bool is_image = is_texture && (mem.data_height > 1);
709
710                                 /* Can't move this type of memory. */
711                                 if(!is_texture || cmem->array) {
712                                         continue;
713                                 }
714
715                                 /* Already in host memory. */
716                                 if(cmem->map_host_pointer) {
717                                         continue;
718                                 }
719
720                                 /* For other textures, only move image textures. */
721                                 if(for_texture && !is_image) {
722                                         continue;
723                                 }
724
725                                 /* Try to move largest allocation, prefer moving images. */
726                                 if(is_image > max_is_image ||
727                                    (is_image == max_is_image && mem.device_size > max_size)) {
728                                         max_is_image = is_image;
729                                         max_size = mem.device_size;
730                                         max_mem = &mem;
731                                 }
732                         }
733
734                         /* Move to host memory. This part is mutex protected since
735                          * multiple CUDA devices could be moving the memory. The
736                          * first one will do it, and the rest will adopt the pointer. */
737                         if(max_mem) {
738                                 VLOG(1) << "Move memory from device to host: " << max_mem->name;
739
740                                 static thread_mutex move_mutex;
741                                 thread_scoped_lock lock(move_mutex);
742
743                                 /* Preserve the original device pointer, in case of multi device
744                                  * we can't change it because the pointer mapping would break. */
745                                 device_ptr prev_pointer = max_mem->device_pointer;
746                                 size_t prev_size = max_mem->device_size;
747
748                                 tex_free(*max_mem);
749                                 tex_alloc(*max_mem);
750                                 size = (max_size >= size)? 0: size - max_size;
751
752                                 max_mem->device_pointer = prev_pointer;
753                                 max_mem->device_size = prev_size;
754                         }
755                         else {
756                                 break;
757                         }
758                 }
759
760                 /* Update texture info array with new pointers. */
761                 load_texture_info();
762
763                 move_texture_to_host = false;
764         }
765
766         CUDAMem *generic_alloc(device_memory& mem, size_t pitch_padding = 0)
767         {
768                 CUDAContextScope scope(this);
769
770                 CUdeviceptr device_pointer = 0;
771                 size_t size = mem.memory_size() + pitch_padding;
772
773                 CUresult mem_alloc_result = CUDA_ERROR_OUT_OF_MEMORY;
774                 const char *status = "";
775
776                 /* First try allocating in device memory, respecting headroom. We make
777                  * an exception for texture info. It is small and frequently accessed,
778                  * so treat it as working memory.
779                  *
780                  * If there is not enough room for working memory, we will try to move
781                  * textures to host memory, assuming the performance impact would have
782                  * been worse for working memory. */
783                 bool is_texture = (mem.type == MEM_TEXTURE) && (&mem != &texture_info);
784                 bool is_image = is_texture && (mem.data_height > 1);
785
786                 size_t headroom = (is_texture)? device_texture_headroom:
787                                                 device_working_headroom;
788
789                 size_t total = 0, free = 0;
790                 cuMemGetInfo(&free, &total);
791
792                 /* Move textures to host memory if needed. */
793                 if(!move_texture_to_host && !is_image && (size + headroom) >= free) {
794                         move_textures_to_host(size + headroom - free, is_texture);
795                         cuMemGetInfo(&free, &total);
796                 }
797
798                 /* Allocate in device memory. */
799                 if(!move_texture_to_host && (size + headroom) < free) {
800                         mem_alloc_result = cuMemAlloc(&device_pointer, size);
801                         if(mem_alloc_result == CUDA_SUCCESS) {
802                                 status = " in device memory";
803                         }
804                 }
805
806                 /* Fall back to mapped host memory if needed and possible. */
807                 void *map_host_pointer = 0;
808                 bool free_map_host = false;
809
810                 if(mem_alloc_result != CUDA_SUCCESS && can_map_host &&
811                    map_host_used + size < map_host_limit) {
812                         if(mem.shared_pointer) {
813                                 /* Another device already allocated host memory. */
814                                 mem_alloc_result = CUDA_SUCCESS;
815                                 map_host_pointer = mem.shared_pointer;
816                         }
817                         else {
818                                 /* Allocate host memory ourselves. */
819                                 mem_alloc_result = cuMemHostAlloc(&map_host_pointer, size,
820                                                                   CU_MEMHOSTALLOC_DEVICEMAP |
821                                                                   CU_MEMHOSTALLOC_WRITECOMBINED);
822                                 mem.shared_pointer = map_host_pointer;
823                                 free_map_host = true;
824                         }
825
826                         if(mem_alloc_result == CUDA_SUCCESS) {
827                                 cuda_assert(cuMemHostGetDevicePointer_v2(&device_pointer, mem.shared_pointer, 0));
828                                 map_host_used += size;
829                                 status = " in host memory";
830
831                                 /* Replace host pointer with our host allocation. Only works if
832                                  * CUDA memory layout is the same and has no pitch padding. Also
833                                  * does not work if we move textures to host during a render,
834                                  * since other devices might be using the memory. */
835                                 if(!move_texture_to_host && pitch_padding == 0 &&
836                                    mem.host_pointer && mem.host_pointer != mem.shared_pointer) {
837                                         memcpy(mem.shared_pointer, mem.host_pointer, size);
838                                         mem.host_free();
839                                         mem.host_pointer = mem.shared_pointer;
840                                 }
841                         }
842                         else {
843                                 status = " failed, out of host memory";
844                         }
845                 }
846                 else if(mem_alloc_result != CUDA_SUCCESS) {
847                         status = " failed, out of device and host memory";
848                 }
849
850                 if(mem_alloc_result != CUDA_SUCCESS) {
851                         cuda_assert(mem_alloc_result);
852                 }
853
854                 if(mem.name) {
855                         VLOG(1) << "Buffer allocate: " << mem.name << ", "
856                                         << string_human_readable_number(mem.memory_size()) << " bytes. ("
857                                         << string_human_readable_size(mem.memory_size()) << ")"
858                                         << status;
859                 }
860
861                 mem.device_pointer = (device_ptr)device_pointer;
862                 mem.device_size = size;
863                 stats.mem_alloc(size);
864
865                 if(!mem.device_pointer) {
866                         return NULL;
867                 }
868
869                 /* Insert into map of allocations. */
870                 CUDAMem *cmem = &cuda_mem_map[&mem];
871                 cmem->map_host_pointer = map_host_pointer;
872                 cmem->free_map_host = free_map_host;
873                 return cmem;
874         }
875
876         void generic_copy_to(device_memory& mem)
877         {
878                 if(mem.host_pointer && mem.device_pointer) {
879                         CUDAContextScope scope(this);
880
881                         if(mem.host_pointer != mem.shared_pointer) {
882                                 cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer),
883                                                          mem.host_pointer,
884                                                          mem.memory_size()));
885                         }
886                 }
887         }
888
889         void generic_free(device_memory& mem)
890         {
891                 if(mem.device_pointer) {
892                         CUDAContextScope scope(this);
893                         const CUDAMem& cmem = cuda_mem_map[&mem];
894
895                         if(cmem.map_host_pointer) {
896                                 /* Free host memory. */
897                                 if(cmem.free_map_host) {
898                                         cuMemFreeHost(cmem.map_host_pointer);
899                                         if(mem.host_pointer == mem.shared_pointer) {
900                                                 mem.host_pointer = 0;
901                                         }
902                                         mem.shared_pointer = 0;
903                                 }
904
905                                 map_host_used -= mem.device_size;
906                         }
907                         else {
908                                 /* Free device memory. */
909                                 cuMemFree(mem.device_pointer);
910                         }
911
912                         stats.mem_free(mem.device_size);
913                         mem.device_pointer = 0;
914                         mem.device_size = 0;
915
916                         cuda_mem_map.erase(cuda_mem_map.find(&mem));
917                 }
918         }
919
920         void mem_alloc(device_memory& mem)
921         {
922                 if(mem.type == MEM_PIXELS && !background) {
923                         pixels_alloc(mem);
924                 }
925                 else if(mem.type == MEM_TEXTURE) {
926                         assert(!"mem_alloc not supported for textures.");
927                 }
928                 else {
929                         generic_alloc(mem);
930                 }
931         }
932
933         void mem_copy_to(device_memory& mem)
934         {
935                 if(mem.type == MEM_PIXELS) {
936                         assert(!"mem_copy_to not supported for pixels.");
937                 }
938                 else if(mem.type == MEM_TEXTURE) {
939                         tex_free(mem);
940                         tex_alloc(mem);
941                 }
942                 else {
943                         if(!mem.device_pointer) {
944                                 generic_alloc(mem);
945                         }
946
947                         generic_copy_to(mem);
948                 }
949         }
950
951         void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
952         {
953                 if(mem.type == MEM_PIXELS && !background) {
954                         pixels_copy_from(mem, y, w, h);
955                 }
956                 else if(mem.type == MEM_TEXTURE) {
957                         assert(!"mem_copy_from not supported for textures.");
958                 }
959                 else {
960                         CUDAContextScope scope(this);
961                         size_t offset = elem*y*w;
962                         size_t size = elem*w*h;
963
964                         if(mem.host_pointer && mem.device_pointer) {
965                                 cuda_assert(cuMemcpyDtoH((uchar*)mem.host_pointer + offset,
966                                                                                  (CUdeviceptr)(mem.device_pointer + offset), size));
967                         }
968                         else if(mem.host_pointer) {
969                                 memset((char*)mem.host_pointer + offset, 0, size);
970                         }
971                 }
972         }
973
974         void mem_zero(device_memory& mem)
975         {
976                 if(!mem.device_pointer) {
977                         mem_alloc(mem);
978                 }
979
980                 if(mem.host_pointer) {
981                         memset(mem.host_pointer, 0, mem.memory_size());
982                 }
983
984                 if(mem.device_pointer &&
985                    (!mem.host_pointer || mem.host_pointer != mem.shared_pointer)) {
986                         CUDAContextScope scope(this);
987                         cuda_assert(cuMemsetD8(cuda_device_ptr(mem.device_pointer), 0, mem.memory_size()));
988                 }
989         }
990
991         void mem_free(device_memory& mem)
992         {
993                 if(mem.type == MEM_PIXELS && !background) {
994                         pixels_free(mem);
995                 }
996                 else if(mem.type == MEM_TEXTURE) {
997                         tex_free(mem);
998                 }
999                 else {
1000                         generic_free(mem);
1001                 }
1002         }
1003
1004         virtual device_ptr mem_alloc_sub_ptr(device_memory& mem, int offset, int /*size*/)
1005         {
1006                 return (device_ptr) (((char*) mem.device_pointer) + mem.memory_elements_size(offset));
1007         }
1008
1009         void const_copy_to(const char *name, void *host, size_t size)
1010         {
1011                 CUDAContextScope scope(this);
1012                 CUdeviceptr mem;
1013                 size_t bytes;
1014
1015                 cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
1016                 //assert(bytes == size);
1017                 cuda_assert(cuMemcpyHtoD(mem, host, size));
1018         }
1019
1020         void tex_alloc(device_memory& mem)
1021         {
1022                 CUDAContextScope scope(this);
1023
1024                 /* Check if we are on sm_30 or above, for bindless textures. */
1025                 bool has_fermi_limits = info.has_fermi_limits;
1026
1027                 /* General variables for both architectures */
1028                 string bind_name = mem.name;
1029                 size_t dsize = datatype_size(mem.data_type);
1030                 size_t size = mem.memory_size();
1031
1032                 CUaddress_mode address_mode = CU_TR_ADDRESS_MODE_WRAP;
1033                 switch(mem.extension) {
1034                         case EXTENSION_REPEAT:
1035                                 address_mode = CU_TR_ADDRESS_MODE_WRAP;
1036                                 break;
1037                         case EXTENSION_EXTEND:
1038                                 address_mode = CU_TR_ADDRESS_MODE_CLAMP;
1039                                 break;
1040                         case EXTENSION_CLIP:
1041                                 address_mode = CU_TR_ADDRESS_MODE_BORDER;
1042                                 break;
1043                         default:
1044                                 assert(0);
1045                                 break;
1046                 }
1047
1048                 CUfilter_mode filter_mode;
1049                 if(mem.interpolation == INTERPOLATION_CLOSEST) {
1050                         filter_mode = CU_TR_FILTER_MODE_POINT;
1051                 }
1052                 else {
1053                         filter_mode = CU_TR_FILTER_MODE_LINEAR;
1054                 }
1055
1056                 /* Data Storage */
1057                 if(mem.interpolation == INTERPOLATION_NONE) {
1058                         generic_alloc(mem);
1059                         generic_copy_to(mem);
1060
1061                         CUdeviceptr cumem;
1062                         size_t cubytes;
1063
1064                         cuda_assert(cuModuleGetGlobal(&cumem, &cubytes, cuModule, bind_name.c_str()));
1065
1066                         if(cubytes == 8) {
1067                                 /* 64 bit device pointer */
1068                                 uint64_t ptr = mem.device_pointer;
1069                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
1070                         }
1071                         else {
1072                                 /* 32 bit device pointer */
1073                                 uint32_t ptr = (uint32_t)mem.device_pointer;
1074                                 cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
1075                         }
1076                         return;
1077                 }
1078
1079                 /* Image Texture Storage */
1080                 CUtexref texref = NULL;
1081
1082                 if(has_fermi_limits) {
1083                         if(mem.data_depth > 1) {
1084                                 /* Kernel uses different bind names for 2d and 3d float textures,
1085                                  * so we have to adjust couple of things here.
1086                                  */
1087                                 vector<string> tokens;
1088                                 string_split(tokens, mem.name, "_");
1089                                 bind_name = string_printf("__tex_image_%s_3d_%s",
1090                                                           tokens[2].c_str(),
1091                                                           tokens[3].c_str());
1092                         }
1093
1094                         cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
1095
1096                         if(!texref) {
1097                                 return;
1098                         }
1099                 }
1100
1101                 CUarray_format_enum format;
1102                 switch(mem.data_type) {
1103                         case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
1104                         case TYPE_UINT: format = CU_AD_FORMAT_UNSIGNED_INT32; break;
1105                         case TYPE_INT: format = CU_AD_FORMAT_SIGNED_INT32; break;
1106                         case TYPE_FLOAT: format = CU_AD_FORMAT_FLOAT; break;
1107                         case TYPE_HALF: format = CU_AD_FORMAT_HALF; break;
1108                         default: assert(0); return;
1109                 }
1110
1111                 CUDAMem *cmem = NULL;
1112                 CUarray array_3d = NULL;
1113                 size_t src_pitch = mem.data_width * dsize * mem.data_elements;
1114                 size_t dst_pitch = src_pitch;
1115
1116                 if(mem.data_depth > 1) {
1117                         /* 3D texture using array, there is no API for linear memory. */
1118                         CUDA_ARRAY3D_DESCRIPTOR desc;
1119
1120                         desc.Width = mem.data_width;
1121                         desc.Height = mem.data_height;
1122                         desc.Depth = mem.data_depth;
1123                         desc.Format = format;
1124                         desc.NumChannels = mem.data_elements;
1125                         desc.Flags = 0;
1126
1127                         VLOG(1) << "Array 3D allocate: " << mem.name << ", "
1128                                 << string_human_readable_number(mem.memory_size()) << " bytes. ("
1129                                 << string_human_readable_size(mem.memory_size()) << ")";
1130
1131                         cuda_assert(cuArray3DCreate(&array_3d, &desc));
1132
1133                         if(!array_3d) {
1134                                 return;
1135                         }
1136
1137                         CUDA_MEMCPY3D param;
1138                         memset(&param, 0, sizeof(param));
1139                         param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
1140                         param.dstArray = array_3d;
1141                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
1142                         param.srcHost = mem.host_pointer;
1143                         param.srcPitch = src_pitch;
1144                         param.WidthInBytes = param.srcPitch;
1145                         param.Height = mem.data_height;
1146                         param.Depth = mem.data_depth;
1147
1148                         cuda_assert(cuMemcpy3D(&param));
1149
1150                         mem.device_pointer = (device_ptr)array_3d;
1151                         mem.device_size = size;
1152                         stats.mem_alloc(size);
1153
1154                         cmem = &cuda_mem_map[&mem];
1155                         cmem->texobject = 0;
1156                         cmem->array = array_3d;
1157                 }
1158                 else if(mem.data_height > 0) {
1159                         /* 2D texture, using pitch aligned linear memory. */
1160                         int alignment = 0;
1161                         cuda_assert(cuDeviceGetAttribute(&alignment, CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT, cuDevice));
1162                         dst_pitch = align_up(src_pitch, alignment);
1163                         size_t dst_size = dst_pitch * mem.data_height;
1164
1165                         cmem = generic_alloc(mem, dst_size - mem.memory_size());
1166                         if(!cmem) {
1167                                 return;
1168                         }
1169
1170                         CUDA_MEMCPY2D param;
1171                         memset(&param, 0, sizeof(param));
1172                         param.dstMemoryType = CU_MEMORYTYPE_DEVICE;
1173                         param.dstDevice = mem.device_pointer;
1174                         param.dstPitch = dst_pitch;
1175                         param.srcMemoryType = CU_MEMORYTYPE_HOST;
1176                         param.srcHost = mem.host_pointer;
1177                         param.srcPitch = src_pitch;
1178                         param.WidthInBytes = param.srcPitch;
1179                         param.Height = mem.data_height;
1180
1181                         cuda_assert(cuMemcpy2DUnaligned(&param));
1182                 }
1183                 else {
1184                         /* 1D texture, using linear memory. */
1185                         cmem = generic_alloc(mem);
1186                         if(!cmem) {
1187                                 return;
1188                         }
1189
1190                         cuda_assert(cuMemcpyHtoD(mem.device_pointer, mem.host_pointer, size));
1191                 }
1192
1193                 if(!has_fermi_limits) {
1194                         /* Kepler+, bindless textures. */
1195                         int flat_slot = 0;
1196                         if(string_startswith(mem.name, "__tex_image")) {
1197                                 int pos =  string(mem.name).rfind("_");
1198                                 flat_slot = atoi(mem.name + pos + 1);
1199                         }
1200                         else {
1201                                 assert(0);
1202                         }
1203
1204                         CUDA_RESOURCE_DESC resDesc;
1205                         memset(&resDesc, 0, sizeof(resDesc));
1206
1207                         if(array_3d) {
1208                                 resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
1209                                 resDesc.res.array.hArray = array_3d;
1210                                 resDesc.flags = 0;
1211                         }
1212                         else if(mem.data_height > 0) {
1213                                 resDesc.resType = CU_RESOURCE_TYPE_PITCH2D;
1214                                 resDesc.res.pitch2D.devPtr = mem.device_pointer;
1215                                 resDesc.res.pitch2D.format = format;
1216                                 resDesc.res.pitch2D.numChannels = mem.data_elements;
1217                                 resDesc.res.pitch2D.height = mem.data_height;
1218                                 resDesc.res.pitch2D.width = mem.data_width;
1219                                 resDesc.res.pitch2D.pitchInBytes = dst_pitch;
1220                         }
1221                         else {
1222                                 resDesc.resType = CU_RESOURCE_TYPE_LINEAR;
1223                                 resDesc.res.linear.devPtr = mem.device_pointer;
1224                                 resDesc.res.linear.format = format;
1225                                 resDesc.res.linear.numChannels = mem.data_elements;
1226                                 resDesc.res.linear.sizeInBytes = mem.device_size;
1227                         }
1228
1229                         CUDA_TEXTURE_DESC texDesc;
1230                         memset(&texDesc, 0, sizeof(texDesc));
1231                         texDesc.addressMode[0] = address_mode;
1232                         texDesc.addressMode[1] = address_mode;
1233                         texDesc.addressMode[2] = address_mode;
1234                         texDesc.filterMode = filter_mode;
1235                         texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
1236
1237                         cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
1238
1239                         /* Resize once */
1240                         if(flat_slot >= texture_info.size()) {
1241                                 /* Allocate some slots in advance, to reduce amount
1242                                  * of re-allocations. */
1243                                 texture_info.resize(flat_slot + 128);
1244                         }
1245
1246                         /* Set Mapping and tag that we need to (re-)upload to device */
1247                         TextureInfo& info = texture_info[flat_slot];
1248                         info.data = (uint64_t)cmem->texobject;
1249                         info.cl_buffer = 0;
1250                         info.interpolation = mem.interpolation;
1251                         info.extension = mem.extension;
1252                         info.width = mem.data_width;
1253                         info.height = mem.data_height;
1254                         info.depth = mem.data_depth;
1255                         need_texture_info = true;
1256                 }
1257                 else {
1258                         /* Fermi, fixed texture slots. */
1259                         if(array_3d) {
1260                                 cuda_assert(cuTexRefSetArray(texref, array_3d, CU_TRSA_OVERRIDE_FORMAT));
1261                         }
1262                         else if(mem.data_height > 0) {
1263                                 CUDA_ARRAY_DESCRIPTOR array_desc;
1264                                 array_desc.Format = format;
1265                                 array_desc.Height = mem.data_height;
1266                                 array_desc.Width = mem.data_width;
1267                                 array_desc.NumChannels = mem.data_elements;
1268                                 cuda_assert(cuTexRefSetAddress2D_v3(texref, &array_desc, mem.device_pointer, dst_pitch));
1269                         }
1270                         else {
1271                                 cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
1272                         }
1273
1274                         /* Attach to texture reference. */
1275                         cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
1276                         cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
1277                         cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
1278                         cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
1279                         cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
1280                         if(mem.data_depth > 1) {
1281                                 cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
1282                         }
1283                 }
1284         }
1285
1286         void tex_free(device_memory& mem)
1287         {
1288                 if(mem.device_pointer) {
1289                         CUDAContextScope scope(this);
1290                         const CUDAMem& cmem = cuda_mem_map[&mem];
1291
1292                         if(cmem.texobject) {
1293                                 /* Free bindless texture. */
1294                                 cuTexObjectDestroy(cmem.texobject);
1295                         }
1296
1297                         if(cmem.array) {
1298                                 /* Free array. */
1299                                 cuArrayDestroy(cmem.array);
1300                                 stats.mem_free(mem.device_size);
1301                                 mem.device_pointer = 0;
1302                                 mem.device_size = 0;
1303
1304                                 cuda_mem_map.erase(cuda_mem_map.find(&mem));
1305                         }
1306                         else {
1307                                 generic_free(mem);
1308                         }
1309                 }
1310         }
1311
1312         bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
1313         {
1314                 TilesInfo *tiles = (TilesInfo*) task->tiles_mem.host_pointer;
1315                 for(int i = 0; i < 9; i++) {
1316                         tiles->buffers[i] = buffers[i];
1317                 }
1318
1319                 task->tiles_mem.copy_to_device();
1320
1321                 return !have_error();
1322         }
1323
1324 #define CUDA_GET_BLOCKSIZE(func, w, h)                                                                          \
1325                         int threads_per_block;                                                                              \
1326                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1327                         int threads = (int)sqrt((float)threads_per_block);                                                  \
1328                         int xblocks = ((w) + threads - 1)/threads;                                                          \
1329                         int yblocks = ((h) + threads - 1)/threads;
1330
1331 #define CUDA_LAUNCH_KERNEL(func, args)                      \
1332                         cuda_assert(cuLaunchKernel(func,                \
1333                                                    xblocks, yblocks, 1, \
1334                                                    threads, threads, 1, \
1335                                                    0, 0, args, 0));
1336
1337 /* Similar as above, but for 1-dimensional blocks. */
1338 #define CUDA_GET_BLOCKSIZE_1D(func, w, h)                                                                       \
1339                         int threads_per_block;                                                                              \
1340                         cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func)); \
1341                         int xblocks = ((w) + threads_per_block - 1)/threads_per_block;                                      \
1342                         int yblocks = h;
1343
1344 #define CUDA_LAUNCH_KERNEL_1D(func, args)                       \
1345                         cuda_assert(cuLaunchKernel(func,                    \
1346                                                    xblocks, yblocks, 1,     \
1347                                                    threads_per_block, 1, 1, \
1348                                                    0, 0, args, 0));
1349
1350         bool denoising_non_local_means(device_ptr image_ptr, device_ptr guide_ptr, device_ptr variance_ptr, device_ptr out_ptr,
1351                                        DenoisingTask *task)
1352         {
1353                 if(have_error())
1354                         return false;
1355
1356                 CUDAContextScope scope(this);
1357
1358                 int stride = task->buffer.stride;
1359                 int w = task->buffer.width;
1360                 int h = task->buffer.h;
1361                 int r = task->nlm_state.r;
1362                 int f = task->nlm_state.f;
1363                 float a = task->nlm_state.a;
1364                 float k_2 = task->nlm_state.k_2;
1365
1366                 int shift_stride = stride*h;
1367                 int num_shifts = (2*r+1)*(2*r+1);
1368                 int mem_size = sizeof(float)*shift_stride*num_shifts;
1369                 int channel_offset = 0;
1370
1371                 device_only_memory<uchar> temporary_mem(this, "Denoising temporary_mem");
1372                 temporary_mem.alloc_to_device(2*mem_size);
1373
1374                 if(have_error())
1375                         return false;
1376
1377                 CUdeviceptr difference     = cuda_device_ptr(temporary_mem.device_pointer);
1378                 CUdeviceptr blurDifference = difference + mem_size;
1379
1380                 CUdeviceptr weightAccum = task->nlm_state.temporary_3_ptr;
1381                 cuda_assert(cuMemsetD8(weightAccum, 0, sizeof(float)*shift_stride));
1382                 cuda_assert(cuMemsetD8(out_ptr, 0, sizeof(float)*shift_stride));
1383
1384                 {
1385                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMUpdateOutput;
1386                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference, cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1387                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,           cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1388                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,     cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1389                         cuda_assert(cuModuleGetFunction(&cuNLMUpdateOutput,   cuFilterModule, "kernel_cuda_filter_nlm_update_output"));
1390
1391                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference, CU_FUNC_CACHE_PREFER_L1));
1392                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,           CU_FUNC_CACHE_PREFER_L1));
1393                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,     CU_FUNC_CACHE_PREFER_L1));
1394                         cuda_assert(cuFuncSetCacheConfig(cuNLMUpdateOutput,   CU_FUNC_CACHE_PREFER_L1));
1395
1396                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference, w*h, num_shifts);
1397
1398                         void *calc_difference_args[] = {&guide_ptr, &variance_ptr, &difference, &w, &h, &stride, &shift_stride, &r, &channel_offset, &a, &k_2};
1399                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &shift_stride, &r, &f};
1400                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &shift_stride, &r, &f};
1401                         void *update_output_args[]   = {&blurDifference, &image_ptr, &out_ptr, &weightAccum, &w, &h, &stride, &shift_stride, &r, &f};
1402
1403                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1404                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1405                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1406                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1407                         CUDA_LAUNCH_KERNEL_1D(cuNLMUpdateOutput, update_output_args);
1408                 }
1409
1410                 temporary_mem.free();
1411
1412                 {
1413                         CUfunction cuNLMNormalize;
1414                         cuda_assert(cuModuleGetFunction(&cuNLMNormalize, cuFilterModule, "kernel_cuda_filter_nlm_normalize"));
1415                         cuda_assert(cuFuncSetCacheConfig(cuNLMNormalize, CU_FUNC_CACHE_PREFER_L1));
1416                         void *normalize_args[] = {&out_ptr, &weightAccum, &w, &h, &stride};
1417                         CUDA_GET_BLOCKSIZE(cuNLMNormalize, w, h);
1418                         CUDA_LAUNCH_KERNEL(cuNLMNormalize, normalize_args);
1419                         cuda_assert(cuCtxSynchronize());
1420                 }
1421
1422                 return !have_error();
1423         }
1424
1425         bool denoising_construct_transform(DenoisingTask *task)
1426         {
1427                 if(have_error())
1428                         return false;
1429
1430                 CUDAContextScope scope(this);
1431
1432                 CUfunction cuFilterConstructTransform;
1433                 cuda_assert(cuModuleGetFunction(&cuFilterConstructTransform, cuFilterModule, "kernel_cuda_filter_construct_transform"));
1434                 cuda_assert(cuFuncSetCacheConfig(cuFilterConstructTransform, CU_FUNC_CACHE_PREFER_SHARED));
1435                 CUDA_GET_BLOCKSIZE(cuFilterConstructTransform,
1436                                    task->storage.w,
1437                                    task->storage.h);
1438
1439                 void *args[] = {&task->buffer.mem.device_pointer,
1440                                 &task->storage.transform.device_pointer,
1441                                 &task->storage.rank.device_pointer,
1442                                 &task->filter_area,
1443                                 &task->rect,
1444                                 &task->radius,
1445                                 &task->pca_threshold,
1446                                 &task->buffer.pass_stride};
1447                 CUDA_LAUNCH_KERNEL(cuFilterConstructTransform, args);
1448                 cuda_assert(cuCtxSynchronize());
1449
1450                 return !have_error();
1451         }
1452
1453         bool denoising_reconstruct(device_ptr color_ptr,
1454                                    device_ptr color_variance_ptr,
1455                                    device_ptr output_ptr,
1456                                    DenoisingTask *task)
1457         {
1458                 if(have_error())
1459                         return false;
1460
1461                 CUDAContextScope scope(this);
1462
1463                 mem_zero(task->storage.XtWX);
1464                 mem_zero(task->storage.XtWY);
1465
1466                 int r = task->radius;
1467                 int f = 4;
1468                 float a = 1.0f;
1469                 float k_2 = task->nlm_k_2;
1470
1471                 int w = task->reconstruction_state.source_w;
1472                 int h = task->reconstruction_state.source_h;
1473                 int stride = task->buffer.stride;
1474
1475                 int shift_stride = stride*h;
1476                 int num_shifts = (2*r+1)*(2*r+1);
1477                 int mem_size = sizeof(float)*shift_stride*num_shifts;
1478
1479                 device_only_memory<uchar> temporary_mem(this, "Denoising temporary_mem");
1480                 temporary_mem.alloc_to_device(2*mem_size);
1481
1482                 if(have_error())
1483                         return false;
1484
1485                 CUdeviceptr difference     = cuda_device_ptr(temporary_mem.device_pointer);
1486                 CUdeviceptr blurDifference = difference + mem_size;
1487
1488                 {
1489                         CUfunction cuNLMCalcDifference, cuNLMBlur, cuNLMCalcWeight, cuNLMConstructGramian;
1490                         cuda_assert(cuModuleGetFunction(&cuNLMCalcDifference,   cuFilterModule, "kernel_cuda_filter_nlm_calc_difference"));
1491                         cuda_assert(cuModuleGetFunction(&cuNLMBlur,             cuFilterModule, "kernel_cuda_filter_nlm_blur"));
1492                         cuda_assert(cuModuleGetFunction(&cuNLMCalcWeight,       cuFilterModule, "kernel_cuda_filter_nlm_calc_weight"));
1493                         cuda_assert(cuModuleGetFunction(&cuNLMConstructGramian, cuFilterModule, "kernel_cuda_filter_nlm_construct_gramian"));
1494
1495                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcDifference,   CU_FUNC_CACHE_PREFER_L1));
1496                         cuda_assert(cuFuncSetCacheConfig(cuNLMBlur,             CU_FUNC_CACHE_PREFER_L1));
1497                         cuda_assert(cuFuncSetCacheConfig(cuNLMCalcWeight,       CU_FUNC_CACHE_PREFER_L1));
1498                         cuda_assert(cuFuncSetCacheConfig(cuNLMConstructGramian, CU_FUNC_CACHE_PREFER_SHARED));
1499
1500                         CUDA_GET_BLOCKSIZE_1D(cuNLMCalcDifference,
1501                                              task->reconstruction_state.source_w * task->reconstruction_state.source_h,
1502                                              num_shifts);
1503
1504                         void *calc_difference_args[] = {&color_ptr, &color_variance_ptr, &difference, &w, &h, &stride, &shift_stride, &r, &task->buffer.pass_stride, &a, &k_2};
1505                         void *blur_args[]            = {&difference, &blurDifference, &w, &h, &stride, &shift_stride, &r, &f};
1506                         void *calc_weight_args[]     = {&blurDifference, &difference, &w, &h, &stride, &shift_stride, &r, &f};
1507                         void *construct_gramian_args[] = {&blurDifference,
1508                                                           &task->buffer.mem.device_pointer,
1509                                                           &task->storage.transform.device_pointer,
1510                                                           &task->storage.rank.device_pointer,
1511                                                           &task->storage.XtWX.device_pointer,
1512                                                           &task->storage.XtWY.device_pointer,
1513                                                           &task->reconstruction_state.filter_window,
1514                                                           &w, &h, &stride,
1515                                                           &shift_stride, &r,
1516                                                           &f,
1517                                                       &task->buffer.pass_stride};
1518
1519                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcDifference, calc_difference_args);
1520                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1521                         CUDA_LAUNCH_KERNEL_1D(cuNLMCalcWeight, calc_weight_args);
1522                         CUDA_LAUNCH_KERNEL_1D(cuNLMBlur, blur_args);
1523                         CUDA_LAUNCH_KERNEL_1D(cuNLMConstructGramian, construct_gramian_args);
1524                 }
1525
1526                 temporary_mem.free();
1527
1528                 {
1529                         CUfunction cuFinalize;
1530                         cuda_assert(cuModuleGetFunction(&cuFinalize, cuFilterModule, "kernel_cuda_filter_finalize"));
1531                         cuda_assert(cuFuncSetCacheConfig(cuFinalize, CU_FUNC_CACHE_PREFER_L1));
1532                         void *finalize_args[] = {&output_ptr,
1533                                                          &task->storage.rank.device_pointer,
1534                                                          &task->storage.XtWX.device_pointer,
1535                                                          &task->storage.XtWY.device_pointer,
1536                                                          &task->filter_area,
1537                                                          &task->reconstruction_state.buffer_params.x,
1538                                                          &task->render_buffer.samples};
1539                         CUDA_GET_BLOCKSIZE(cuFinalize,
1540                                            task->reconstruction_state.source_w,
1541                                            task->reconstruction_state.source_h);
1542                         CUDA_LAUNCH_KERNEL(cuFinalize, finalize_args);
1543                 }
1544
1545                 cuda_assert(cuCtxSynchronize());
1546
1547                 return !have_error();
1548         }
1549
1550         bool denoising_combine_halves(device_ptr a_ptr, device_ptr b_ptr,
1551                                       device_ptr mean_ptr, device_ptr variance_ptr,
1552                                       int r, int4 rect, DenoisingTask *task)
1553         {
1554                 if(have_error())
1555                         return false;
1556
1557                 CUDAContextScope scope(this);
1558
1559                 CUfunction cuFilterCombineHalves;
1560                 cuda_assert(cuModuleGetFunction(&cuFilterCombineHalves, cuFilterModule, "kernel_cuda_filter_combine_halves"));
1561                 cuda_assert(cuFuncSetCacheConfig(cuFilterCombineHalves, CU_FUNC_CACHE_PREFER_L1));
1562                 CUDA_GET_BLOCKSIZE(cuFilterCombineHalves,
1563                                    task->rect.z-task->rect.x,
1564                                    task->rect.w-task->rect.y);
1565
1566                 void *args[] = {&mean_ptr,
1567                                 &variance_ptr,
1568                                 &a_ptr,
1569                                 &b_ptr,
1570                                 &rect,
1571                                 &r};
1572                 CUDA_LAUNCH_KERNEL(cuFilterCombineHalves, args);
1573                 cuda_assert(cuCtxSynchronize());
1574
1575                 return !have_error();
1576         }
1577
1578         bool denoising_divide_shadow(device_ptr a_ptr, device_ptr b_ptr,
1579                                      device_ptr sample_variance_ptr, device_ptr sv_variance_ptr,
1580                                      device_ptr buffer_variance_ptr, DenoisingTask *task)
1581         {
1582                 if(have_error())
1583                         return false;
1584
1585                 CUDAContextScope scope(this);
1586
1587                 CUfunction cuFilterDivideShadow;
1588                 cuda_assert(cuModuleGetFunction(&cuFilterDivideShadow, cuFilterModule, "kernel_cuda_filter_divide_shadow"));
1589                 cuda_assert(cuFuncSetCacheConfig(cuFilterDivideShadow, CU_FUNC_CACHE_PREFER_L1));
1590                 CUDA_GET_BLOCKSIZE(cuFilterDivideShadow,
1591                                    task->rect.z-task->rect.x,
1592                                    task->rect.w-task->rect.y);
1593
1594                 void *args[] = {&task->render_buffer.samples,
1595                                 &task->tiles_mem.device_pointer,
1596                                 &a_ptr,
1597                                 &b_ptr,
1598                                 &sample_variance_ptr,
1599                                 &sv_variance_ptr,
1600                                 &buffer_variance_ptr,
1601                                 &task->rect,
1602                                 &task->render_buffer.pass_stride,
1603                                 &task->render_buffer.denoising_data_offset};
1604                 CUDA_LAUNCH_KERNEL(cuFilterDivideShadow, args);
1605                 cuda_assert(cuCtxSynchronize());
1606
1607                 return !have_error();
1608         }
1609
1610         bool denoising_get_feature(int mean_offset,
1611                                    int variance_offset,
1612                                    device_ptr mean_ptr,
1613                                    device_ptr variance_ptr,
1614                                    DenoisingTask *task)
1615         {
1616                 if(have_error())
1617                         return false;
1618
1619                 CUDAContextScope scope(this);
1620
1621                 CUfunction cuFilterGetFeature;
1622                 cuda_assert(cuModuleGetFunction(&cuFilterGetFeature, cuFilterModule, "kernel_cuda_filter_get_feature"));
1623                 cuda_assert(cuFuncSetCacheConfig(cuFilterGetFeature, CU_FUNC_CACHE_PREFER_L1));
1624                 CUDA_GET_BLOCKSIZE(cuFilterGetFeature,
1625                                    task->rect.z-task->rect.x,
1626                                    task->rect.w-task->rect.y);
1627
1628                 void *args[] = {&task->render_buffer.samples,
1629                                 &task->tiles_mem.device_pointer,
1630                                 &mean_offset,
1631                                 &variance_offset,
1632                                 &mean_ptr,
1633                                 &variance_ptr,
1634                                 &task->rect,
1635                                 &task->render_buffer.pass_stride,
1636                                 &task->render_buffer.denoising_data_offset};
1637                 CUDA_LAUNCH_KERNEL(cuFilterGetFeature, args);
1638                 cuda_assert(cuCtxSynchronize());
1639
1640                 return !have_error();
1641         }
1642
1643         bool denoising_detect_outliers(device_ptr image_ptr,
1644                                        device_ptr variance_ptr,
1645                                        device_ptr depth_ptr,
1646                                        device_ptr output_ptr,
1647                                        DenoisingTask *task)
1648         {
1649                 if(have_error())
1650                         return false;
1651
1652                 CUDAContextScope scope(this);
1653
1654                 CUfunction cuFilterDetectOutliers;
1655                 cuda_assert(cuModuleGetFunction(&cuFilterDetectOutliers, cuFilterModule, "kernel_cuda_filter_detect_outliers"));
1656                 cuda_assert(cuFuncSetCacheConfig(cuFilterDetectOutliers, CU_FUNC_CACHE_PREFER_L1));
1657                 CUDA_GET_BLOCKSIZE(cuFilterDetectOutliers,
1658                                    task->rect.z-task->rect.x,
1659                                    task->rect.w-task->rect.y);
1660
1661                 void *args[] = {&image_ptr,
1662                                 &variance_ptr,
1663                                 &depth_ptr,
1664                                 &output_ptr,
1665                                 &task->rect,
1666                                 &task->buffer.pass_stride};
1667
1668                 CUDA_LAUNCH_KERNEL(cuFilterDetectOutliers, args);
1669                 cuda_assert(cuCtxSynchronize());
1670
1671                 return !have_error();
1672         }
1673
1674         void denoise(RenderTile &rtile, DenoisingTask& denoising, const DeviceTask &task)
1675         {
1676                 denoising.functions.construct_transform = function_bind(&CUDADevice::denoising_construct_transform, this, &denoising);
1677                 denoising.functions.reconstruct = function_bind(&CUDADevice::denoising_reconstruct, this, _1, _2, _3, &denoising);
1678                 denoising.functions.divide_shadow = function_bind(&CUDADevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
1679                 denoising.functions.non_local_means = function_bind(&CUDADevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
1680                 denoising.functions.combine_halves = function_bind(&CUDADevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
1681                 denoising.functions.get_feature = function_bind(&CUDADevice::denoising_get_feature, this, _1, _2, _3, _4, &denoising);
1682                 denoising.functions.detect_outliers = function_bind(&CUDADevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
1683                 denoising.functions.set_tiles = function_bind(&CUDADevice::denoising_set_tiles, this, _1, &denoising);
1684
1685                 denoising.filter_area = make_int4(rtile.x, rtile.y, rtile.w, rtile.h);
1686                 denoising.render_buffer.samples = rtile.sample;
1687
1688                 RenderTile rtiles[9];
1689                 rtiles[4] = rtile;
1690                 task.map_neighbor_tiles(rtiles, this);
1691                 denoising.tiles_from_rendertiles(rtiles);
1692
1693                 denoising.init_from_devicetask(task);
1694
1695                 denoising.run_denoising();
1696
1697                 task.unmap_neighbor_tiles(rtiles, this);
1698         }
1699
1700         void path_trace(DeviceTask& task, RenderTile& rtile, device_vector<WorkTile>& work_tiles)
1701         {
1702                 scoped_timer timer(&rtile.buffers->render_time);
1703
1704                 if(have_error())
1705                         return;
1706
1707                 CUDAContextScope scope(this);
1708                 CUfunction cuPathTrace;
1709
1710                 /* Get kernel function. */
1711                 if(task.integrator_branched) {
1712                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
1713                 }
1714                 else {
1715                         cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
1716                 }
1717
1718                 if(have_error()) {
1719                         return;
1720                 }
1721
1722                 cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
1723
1724                 /* Allocate work tile. */
1725                 work_tiles.alloc(1);
1726
1727                 WorkTile *wtile = work_tiles.data();
1728                 wtile->x = rtile.x;
1729                 wtile->y = rtile.y;
1730                 wtile->w = rtile.w;
1731                 wtile->h = rtile.h;
1732                 wtile->offset = rtile.offset;
1733                 wtile->stride = rtile.stride;
1734                 wtile->buffer = (float*)cuda_device_ptr(rtile.buffer);
1735
1736                 /* Prepare work size. More step samples render faster, but for now we
1737                  * remain conservative for GPUs connected to a display to avoid driver
1738                  * timeouts and display freezing. */
1739                 int min_blocks, num_threads_per_block;
1740                 cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
1741                 if(!info.display_device) {
1742                         min_blocks *= 8;
1743                 }
1744
1745                 uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);;
1746
1747                 /* Render all samples. */
1748                 int start_sample = rtile.start_sample;
1749                 int end_sample = rtile.start_sample + rtile.num_samples;
1750
1751                 for(int sample = start_sample; sample < end_sample; sample += step_samples) {
1752                         /* Setup and copy work tile to device. */
1753                         wtile->start_sample = sample;
1754                         wtile->num_samples = min(step_samples, end_sample - sample);;
1755                         work_tiles.copy_to_device();
1756
1757                         CUdeviceptr d_work_tiles = cuda_device_ptr(work_tiles.device_pointer);
1758                         uint total_work_size = wtile->w * wtile->h * wtile->num_samples;
1759                         uint num_blocks = divide_up(total_work_size, num_threads_per_block);
1760
1761                         /* Launch kernel. */
1762                         void *args[] = {&d_work_tiles,
1763                                         &total_work_size};
1764
1765                         cuda_assert(cuLaunchKernel(cuPathTrace,
1766                                                    num_blocks, 1, 1,
1767                                                    num_threads_per_block, 1, 1,
1768                                                    0, 0, args, 0));
1769
1770                         cuda_assert(cuCtxSynchronize());
1771
1772                         /* Update progress. */
1773                         rtile.sample = sample + wtile->num_samples;
1774                         task.update_progress(&rtile, rtile.w*rtile.h*wtile->num_samples);
1775
1776                         if(task.get_cancel()) {
1777                                 if(task.need_finish_queue == false)
1778                                         break;
1779                         }
1780                 }
1781         }
1782
1783         void film_convert(DeviceTask& task, device_ptr buffer, device_ptr rgba_byte, device_ptr rgba_half)
1784         {
1785                 if(have_error())
1786                         return;
1787
1788                 CUDAContextScope scope(this);
1789
1790                 CUfunction cuFilmConvert;
1791                 CUdeviceptr d_rgba = map_pixels((rgba_byte)? rgba_byte: rgba_half);
1792                 CUdeviceptr d_buffer = cuda_device_ptr(buffer);
1793
1794                 /* get kernel function */
1795                 if(rgba_half) {
1796                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_half_float"));
1797                 }
1798                 else {
1799                         cuda_assert(cuModuleGetFunction(&cuFilmConvert, cuModule, "kernel_cuda_convert_to_byte"));
1800                 }
1801
1802
1803                 float sample_scale = 1.0f/(task.sample + 1);
1804
1805                 /* pass in parameters */
1806                 void *args[] = {&d_rgba,
1807                                 &d_buffer,
1808                                 &sample_scale,
1809                                 &task.x,
1810                                 &task.y,
1811                                 &task.w,
1812                                 &task.h,
1813                                 &task.offset,
1814                                 &task.stride};
1815
1816                 /* launch kernel */
1817                 int threads_per_block;
1818                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFilmConvert));
1819
1820                 int xthreads = (int)sqrt(threads_per_block);
1821                 int ythreads = (int)sqrt(threads_per_block);
1822                 int xblocks = (task.w + xthreads - 1)/xthreads;
1823                 int yblocks = (task.h + ythreads - 1)/ythreads;
1824
1825                 cuda_assert(cuFuncSetCacheConfig(cuFilmConvert, CU_FUNC_CACHE_PREFER_L1));
1826
1827                 cuda_assert(cuLaunchKernel(cuFilmConvert,
1828                                            xblocks , yblocks, 1, /* blocks */
1829                                            xthreads, ythreads, 1, /* threads */
1830                                            0, 0, args, 0));
1831
1832                 unmap_pixels((rgba_byte)? rgba_byte: rgba_half);
1833
1834                 cuda_assert(cuCtxSynchronize());
1835         }
1836
1837         void shader(DeviceTask& task)
1838         {
1839                 if(have_error())
1840                         return;
1841
1842                 CUDAContextScope scope(this);
1843
1844                 CUfunction cuShader;
1845                 CUdeviceptr d_input = cuda_device_ptr(task.shader_input);
1846                 CUdeviceptr d_output = cuda_device_ptr(task.shader_output);
1847
1848                 /* get kernel function */
1849                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1850                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_bake"));
1851                 }
1852                 else if(task.shader_eval_type == SHADER_EVAL_DISPLACE) {
1853                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_displace"));
1854                 }
1855                 else {
1856                         cuda_assert(cuModuleGetFunction(&cuShader, cuModule, "kernel_cuda_background"));
1857                 }
1858
1859                 /* do tasks in smaller chunks, so we can cancel it */
1860                 const int shader_chunk_size = 65536;
1861                 const int start = task.shader_x;
1862                 const int end = task.shader_x + task.shader_w;
1863                 int offset = task.offset;
1864
1865                 bool canceled = false;
1866                 for(int sample = 0; sample < task.num_samples && !canceled; sample++) {
1867                         for(int shader_x = start; shader_x < end; shader_x += shader_chunk_size) {
1868                                 int shader_w = min(shader_chunk_size, end - shader_x);
1869
1870                                 /* pass in parameters */
1871                                 void *args[8];
1872                                 int arg = 0;
1873                                 args[arg++] = &d_input;
1874                                 args[arg++] = &d_output;
1875                                 args[arg++] = &task.shader_eval_type;
1876                                 if(task.shader_eval_type >= SHADER_EVAL_BAKE) {
1877                                         args[arg++] = &task.shader_filter;
1878                                 }
1879                                 args[arg++] = &shader_x;
1880                                 args[arg++] = &shader_w;
1881                                 args[arg++] = &offset;
1882                                 args[arg++] = &sample;
1883
1884                                 /* launch kernel */
1885                                 int threads_per_block;
1886                                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuShader));
1887
1888                                 int xblocks = (shader_w + threads_per_block - 1)/threads_per_block;
1889
1890                                 cuda_assert(cuFuncSetCacheConfig(cuShader, CU_FUNC_CACHE_PREFER_L1));
1891                                 cuda_assert(cuLaunchKernel(cuShader,
1892                                                            xblocks , 1, 1, /* blocks */
1893                                                            threads_per_block, 1, 1, /* threads */
1894                                                            0, 0, args, 0));
1895
1896                                 cuda_assert(cuCtxSynchronize());
1897
1898                                 if(task.get_cancel()) {
1899                                         canceled = true;
1900                                         break;
1901                                 }
1902                         }
1903
1904                         task.update_progress(NULL);
1905                 }
1906         }
1907
1908         CUdeviceptr map_pixels(device_ptr mem)
1909         {
1910                 if(!background) {
1911                         PixelMem pmem = pixel_mem_map[mem];
1912                         CUdeviceptr buffer;
1913
1914                         size_t bytes;
1915                         cuda_assert(cuGraphicsMapResources(1, &pmem.cuPBOresource, 0));
1916                         cuda_assert(cuGraphicsResourceGetMappedPointer(&buffer, &bytes, pmem.cuPBOresource));
1917
1918                         return buffer;
1919                 }
1920
1921                 return cuda_device_ptr(mem);
1922         }
1923
1924         void unmap_pixels(device_ptr mem)
1925         {
1926                 if(!background) {
1927                         PixelMem pmem = pixel_mem_map[mem];
1928
1929                         cuda_assert(cuGraphicsUnmapResources(1, &pmem.cuPBOresource, 0));
1930                 }
1931         }
1932
1933         void pixels_alloc(device_memory& mem)
1934         {
1935                 PixelMem pmem;
1936
1937                 pmem.w = mem.data_width;
1938                 pmem.h = mem.data_height;
1939
1940                 CUDAContextScope scope(this);
1941
1942                 glGenBuffers(1, &pmem.cuPBO);
1943                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1944                 if(mem.data_type == TYPE_HALF)
1945                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(GLhalf)*4, NULL, GL_DYNAMIC_DRAW);
1946                 else
1947                         glBufferData(GL_PIXEL_UNPACK_BUFFER, pmem.w*pmem.h*sizeof(uint8_t)*4, NULL, GL_DYNAMIC_DRAW);
1948
1949                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1950
1951                 glGenTextures(1, &pmem.cuTexId);
1952                 glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
1953                 if(mem.data_type == TYPE_HALF)
1954                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, pmem.w, pmem.h, 0, GL_RGBA, GL_HALF_FLOAT, NULL);
1955                 else
1956                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, pmem.w, pmem.h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
1957                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1958                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1959                 glBindTexture(GL_TEXTURE_2D, 0);
1960
1961                 CUresult result = cuGraphicsGLRegisterBuffer(&pmem.cuPBOresource, pmem.cuPBO, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
1962
1963                 if(result == CUDA_SUCCESS) {
1964                         mem.device_pointer = pmem.cuTexId;
1965                         pixel_mem_map[mem.device_pointer] = pmem;
1966
1967                         mem.device_size = mem.memory_size();
1968                         stats.mem_alloc(mem.device_size);
1969
1970                         return;
1971                 }
1972                 else {
1973                         /* failed to register buffer, fallback to no interop */
1974                         glDeleteBuffers(1, &pmem.cuPBO);
1975                         glDeleteTextures(1, &pmem.cuTexId);
1976
1977                         background = true;
1978                 }
1979         }
1980
1981         void pixels_copy_from(device_memory& mem, int y, int w, int h)
1982         {
1983                 PixelMem pmem = pixel_mem_map[mem.device_pointer];
1984
1985                 CUDAContextScope scope(this);
1986
1987                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
1988                 uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
1989                 size_t offset = sizeof(uchar)*4*y*w;
1990                 memcpy((uchar*)mem.host_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
1991                 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1992                 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1993         }
1994
1995         void pixels_free(device_memory& mem)
1996         {
1997                 if(mem.device_pointer) {
1998                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
1999
2000                         CUDAContextScope scope(this);
2001
2002                         cuda_assert(cuGraphicsUnregisterResource(pmem.cuPBOresource));
2003                         glDeleteBuffers(1, &pmem.cuPBO);
2004                         glDeleteTextures(1, &pmem.cuTexId);
2005
2006                         pixel_mem_map.erase(pixel_mem_map.find(mem.device_pointer));
2007                         mem.device_pointer = 0;
2008
2009                         stats.mem_free(mem.device_size);
2010                         mem.device_size = 0;
2011                 }
2012         }
2013
2014         void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
2015                 const DeviceDrawParams &draw_params)
2016         {
2017                 assert(mem.type == MEM_PIXELS);
2018
2019                 if(!background) {
2020                         PixelMem pmem = pixel_mem_map[mem.device_pointer];
2021                         float *vpointer;
2022
2023                         CUDAContextScope scope(this);
2024
2025                         /* for multi devices, this assumes the inefficient method that we allocate
2026                          * all pixels on the device even though we only render to a subset */
2027                         size_t offset = 4*y*w;
2028
2029                         if(mem.data_type == TYPE_HALF)
2030                                 offset *= sizeof(GLhalf);
2031                         else
2032                                 offset *= sizeof(uint8_t);
2033
2034                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
2035                         glBindTexture(GL_TEXTURE_2D, pmem.cuTexId);
2036                         if(mem.data_type == TYPE_HALF)
2037                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_HALF_FLOAT, (void*)offset);
2038                         else
2039                                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)offset);
2040                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
2041
2042                         glEnable(GL_TEXTURE_2D);
2043
2044                         if(transparent) {
2045                                 glEnable(GL_BLEND);
2046                                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
2047                         }
2048
2049                         glColor3f(1.0f, 1.0f, 1.0f);
2050
2051                         if(draw_params.bind_display_space_shader_cb) {
2052                                 draw_params.bind_display_space_shader_cb();
2053                         }
2054
2055                         if(!vertex_buffer)
2056                                 glGenBuffers(1, &vertex_buffer);
2057
2058                         glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
2059                         /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
2060                         glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
2061
2062                         vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
2063
2064                         if(vpointer) {
2065                                 /* texture coordinate - vertex pair */
2066                                 vpointer[0] = 0.0f;
2067                                 vpointer[1] = 0.0f;
2068                                 vpointer[2] = dx;
2069                                 vpointer[3] = dy;
2070
2071                                 vpointer[4] = (float)w/(float)pmem.w;
2072                                 vpointer[5] = 0.0f;
2073                                 vpointer[6] = (float)width + dx;
2074                                 vpointer[7] = dy;
2075
2076                                 vpointer[8] = (float)w/(float)pmem.w;
2077                                 vpointer[9] = (float)h/(float)pmem.h;
2078                                 vpointer[10] = (float)width + dx;
2079                                 vpointer[11] = (float)height + dy;
2080
2081                                 vpointer[12] = 0.0f;
2082                                 vpointer[13] = (float)h/(float)pmem.h;
2083                                 vpointer[14] = dx;
2084                                 vpointer[15] = (float)height + dy;
2085
2086                                 glUnmapBuffer(GL_ARRAY_BUFFER);
2087                         }
2088
2089                         glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
2090                         glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
2091
2092                         glEnableClientState(GL_VERTEX_ARRAY);
2093                         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
2094
2095                         glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
2096
2097                         glDisableClientState(GL_TEXTURE_COORD_ARRAY);
2098                         glDisableClientState(GL_VERTEX_ARRAY);
2099
2100                         glBindBuffer(GL_ARRAY_BUFFER, 0);
2101
2102                         if(draw_params.unbind_display_space_shader_cb) {
2103                                 draw_params.unbind_display_space_shader_cb();
2104                         }
2105
2106                         if(transparent)
2107                                 glDisable(GL_BLEND);
2108
2109                         glBindTexture(GL_TEXTURE_2D, 0);
2110                         glDisable(GL_TEXTURE_2D);
2111
2112                         return;
2113                 }
2114
2115                 Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
2116         }
2117
2118         void thread_run(DeviceTask *task)
2119         {
2120                 CUDAContextScope scope(this);
2121
2122                 if(task->type == DeviceTask::RENDER) {
2123                         DeviceRequestedFeatures requested_features;
2124                         if(use_split_kernel()) {
2125                                 if(split_kernel == NULL) {
2126                                         split_kernel = new CUDASplitKernel(this);
2127                                         split_kernel->load_kernels(requested_features);
2128                                 }
2129                         }
2130
2131                         device_vector<WorkTile> work_tiles(this, "work_tiles", MEM_READ_ONLY);
2132
2133                         /* keep rendering tiles until done */
2134                         RenderTile tile;
2135                         DenoisingTask denoising(this);
2136
2137                         while(task->acquire_tile(this, tile)) {
2138                                 if(tile.task == RenderTile::PATH_TRACE) {
2139                                         if(use_split_kernel()) {
2140                                                 device_only_memory<uchar> void_buffer(this, "void_buffer");
2141                                                 split_kernel->path_trace(task, tile, void_buffer, void_buffer);
2142                                         }
2143                                         else {
2144                                                 path_trace(*task, tile, work_tiles);
2145                                         }
2146                                 }
2147                                 else if(tile.task == RenderTile::DENOISE) {
2148                                         tile.sample = tile.start_sample + tile.num_samples;
2149
2150                                         denoise(tile, denoising, *task);
2151
2152                                         task->update_progress(&tile, tile.w*tile.h);
2153                                 }
2154
2155                                 task->release_tile(tile);
2156
2157                                 if(task->get_cancel()) {
2158                                         if(task->need_finish_queue == false)
2159                                                 break;
2160                                 }
2161                         }
2162
2163                         work_tiles.free();
2164                 }
2165                 else if(task->type == DeviceTask::SHADER) {
2166                         shader(*task);
2167
2168                         cuda_assert(cuCtxSynchronize());
2169                 }
2170         }
2171
2172         class CUDADeviceTask : public DeviceTask {
2173         public:
2174                 CUDADeviceTask(CUDADevice *device, DeviceTask& task)
2175                 : DeviceTask(task)
2176                 {
2177                         run = function_bind(&CUDADevice::thread_run, device, this);
2178                 }
2179         };
2180
2181         int get_split_task_count(DeviceTask& /*task*/)
2182         {
2183                 return 1;
2184         }
2185
2186         void task_add(DeviceTask& task)
2187         {
2188                 CUDAContextScope scope(this);
2189
2190                 /* Load texture info. */
2191                 load_texture_info();
2192
2193                 /* Synchronize all memory copies before executing task. */
2194                 cuda_assert(cuCtxSynchronize());
2195
2196                 if(task.type == DeviceTask::FILM_CONVERT) {
2197                         /* must be done in main thread due to opengl access */
2198                         film_convert(task, task.buffer, task.rgba_byte, task.rgba_half);
2199                 }
2200                 else {
2201                         task_pool.push(new CUDADeviceTask(this, task));
2202                 }
2203         }
2204
2205         void task_wait()
2206         {
2207                 task_pool.wait();
2208         }
2209
2210         void task_cancel()
2211         {
2212                 task_pool.cancel();
2213         }
2214
2215         friend class CUDASplitKernelFunction;
2216         friend class CUDASplitKernel;
2217         friend class CUDAContextScope;
2218 };
2219
2220 /* redefine the cuda_assert macro so it can be used outside of the CUDADevice class
2221  * now that the definition of that class is complete
2222  */
2223 #undef cuda_assert
2224 #define cuda_assert(stmt) \
2225         { \
2226                 CUresult result = stmt; \
2227                 \
2228                 if(result != CUDA_SUCCESS) { \
2229                         string message = string_printf("CUDA error: %s in %s", cuewErrorString(result), #stmt); \
2230                         if(device->error_msg == "") \
2231                                 device->error_msg = message; \
2232                         fprintf(stderr, "%s\n", message.c_str()); \
2233                         /*cuda_abort();*/ \
2234                         device->cuda_error_documentation(); \
2235                 } \
2236         } (void)0
2237
2238
2239 /* CUDA context scope. */
2240
2241 CUDAContextScope::CUDAContextScope(CUDADevice *device)
2242 : device(device)
2243 {
2244         cuda_assert(cuCtxPushCurrent(device->cuContext));
2245 }
2246
2247 CUDAContextScope::~CUDAContextScope()
2248 {
2249         cuda_assert(cuCtxPopCurrent(NULL));
2250 }
2251
2252 /* split kernel */
2253
2254 class CUDASplitKernelFunction : public SplitKernelFunction{
2255         CUDADevice* device;
2256         CUfunction func;
2257 public:
2258         CUDASplitKernelFunction(CUDADevice *device, CUfunction func) : device(device), func(func) {}
2259
2260         /* enqueue the kernel, returns false if there is an error */
2261         bool enqueue(const KernelDimensions &dim, device_memory &/*kg*/, device_memory &/*data*/)
2262         {
2263                 return enqueue(dim, NULL);
2264         }
2265
2266         /* enqueue the kernel, returns false if there is an error */
2267         bool enqueue(const KernelDimensions &dim, void *args[])
2268         {
2269                 if(device->have_error())
2270                         return false;
2271
2272                 CUDAContextScope scope(device);
2273
2274                 /* we ignore dim.local_size for now, as this is faster */
2275                 int threads_per_block;
2276                 cuda_assert(cuFuncGetAttribute(&threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func));
2277
2278                 int xblocks = (dim.global_size[0]*dim.global_size[1] + threads_per_block - 1)/threads_per_block;
2279
2280                 cuda_assert(cuFuncSetCacheConfig(func, CU_FUNC_CACHE_PREFER_L1));
2281
2282                 cuda_assert(cuLaunchKernel(func,
2283                                            xblocks, 1, 1, /* blocks */
2284                                            threads_per_block, 1, 1, /* threads */
2285                                            0, 0, args, 0));
2286
2287                 return !device->have_error();
2288         }
2289 };
2290
2291 CUDASplitKernel::CUDASplitKernel(CUDADevice *device) : DeviceSplitKernel(device), device(device)
2292 {
2293 }
2294
2295 uint64_t CUDASplitKernel::state_buffer_size(device_memory& /*kg*/, device_memory& /*data*/, size_t num_threads)
2296 {
2297         CUDAContextScope scope(device);
2298
2299         device_vector<uint64_t> size_buffer(device, "size_buffer", MEM_READ_WRITE);
2300         size_buffer.alloc(1);
2301         size_buffer.zero_to_device();
2302
2303         uint threads = num_threads;
2304         CUdeviceptr d_size = device->cuda_device_ptr(size_buffer.device_pointer);
2305
2306         struct args_t {
2307                 uint* num_threads;
2308                 CUdeviceptr* size;
2309         };
2310
2311         args_t args = {
2312                 &threads,
2313                 &d_size
2314         };
2315
2316         CUfunction state_buffer_size;
2317         cuda_assert(cuModuleGetFunction(&state_buffer_size, device->cuModule, "kernel_cuda_state_buffer_size"));
2318
2319         cuda_assert(cuLaunchKernel(state_buffer_size,
2320                                    1, 1, 1,
2321                                    1, 1, 1,
2322                                    0, 0, (void**)&args, 0));
2323
2324         size_buffer.copy_from_device(0, 1, 1);
2325         size_t size = size_buffer[0];
2326         size_buffer.free();
2327
2328         return size;
2329 }
2330
2331 bool CUDASplitKernel::enqueue_split_kernel_data_init(const KernelDimensions& dim,
2332                                     RenderTile& rtile,
2333                                     int num_global_elements,
2334                                     device_memory& /*kernel_globals*/,
2335                                     device_memory& /*kernel_data*/,
2336                                     device_memory& split_data,
2337                                     device_memory& ray_state,
2338                                     device_memory& queue_index,
2339                                     device_memory& use_queues_flag,
2340                                     device_memory& work_pool_wgs)
2341 {
2342         CUDAContextScope scope(device);
2343
2344         CUdeviceptr d_split_data = device->cuda_device_ptr(split_data.device_pointer);
2345         CUdeviceptr d_ray_state = device->cuda_device_ptr(ray_state.device_pointer);
2346         CUdeviceptr d_queue_index = device->cuda_device_ptr(queue_index.device_pointer);
2347         CUdeviceptr d_use_queues_flag = device->cuda_device_ptr(use_queues_flag.device_pointer);
2348         CUdeviceptr d_work_pool_wgs = device->cuda_device_ptr(work_pool_wgs.device_pointer);
2349
2350         CUdeviceptr d_buffer = device->cuda_device_ptr(rtile.buffer);
2351
2352         int end_sample = rtile.start_sample + rtile.num_samples;
2353         int queue_size = dim.global_size[0] * dim.global_size[1];
2354
2355         struct args_t {
2356                 CUdeviceptr* split_data_buffer;
2357                 int* num_elements;
2358                 CUdeviceptr* ray_state;
2359                 int* start_sample;
2360                 int* end_sample;
2361                 int* sx;
2362                 int* sy;
2363                 int* sw;
2364                 int* sh;
2365                 int* offset;
2366                 int* stride;
2367                 CUdeviceptr* queue_index;
2368                 int* queuesize;
2369                 CUdeviceptr* use_queues_flag;
2370                 CUdeviceptr* work_pool_wgs;
2371                 int* num_samples;
2372                 CUdeviceptr* buffer;
2373         };
2374
2375         args_t args = {
2376                 &d_split_data,
2377                 &num_global_elements,
2378                 &d_ray_state,
2379                 &rtile.start_sample,
2380                 &end_sample,
2381                 &rtile.x,
2382                 &rtile.y,
2383                 &rtile.w,
2384                 &rtile.h,
2385                 &rtile.offset,
2386                 &rtile.stride,
2387                 &d_queue_index,
2388                 &queue_size,
2389                 &d_use_queues_flag,
2390                 &d_work_pool_wgs,
2391                 &rtile.num_samples,
2392                 &d_buffer
2393         };
2394
2395         CUfunction data_init;
2396         cuda_assert(cuModuleGetFunction(&data_init, device->cuModule, "kernel_cuda_path_trace_data_init"));
2397         if(device->have_error()) {
2398                 return false;
2399         }
2400
2401         CUDASplitKernelFunction(device, data_init).enqueue(dim, (void**)&args);
2402
2403         return !device->have_error();
2404 }
2405
2406 SplitKernelFunction* CUDASplitKernel::get_split_kernel_function(const string& kernel_name,
2407                                                                 const DeviceRequestedFeatures&)
2408 {
2409         CUDAContextScope scope(device);
2410         CUfunction func;
2411
2412         cuda_assert(cuModuleGetFunction(&func, device->cuModule, (string("kernel_cuda_") + kernel_name).data()));
2413         if(device->have_error()) {
2414                 device->cuda_error_message(string_printf("kernel \"kernel_cuda_%s\" not found in module", kernel_name.data()));
2415                 return NULL;
2416         }
2417
2418         return new CUDASplitKernelFunction(device, func);
2419 }
2420
2421 int2 CUDASplitKernel::split_kernel_local_size()
2422 {
2423         return make_int2(32, 1);
2424 }
2425
2426 int2 CUDASplitKernel::split_kernel_global_size(device_memory& kg, device_memory& data, DeviceTask * /*task*/)
2427 {
2428         CUDAContextScope scope(device);
2429         size_t free;
2430         size_t total;
2431
2432         cuda_assert(cuMemGetInfo(&free, &total));
2433
2434         VLOG(1) << "Maximum device allocation size: "
2435                 << string_human_readable_number(free) << " bytes. ("
2436                 << string_human_readable_size(free) << ").";
2437
2438         size_t num_elements = max_elements_for_max_buffer_size(kg, data, free / 2);
2439         size_t side = round_down((int)sqrt(num_elements), 32);
2440         int2 global_size = make_int2(side, round_down(num_elements / side, 16));
2441         VLOG(1) << "Global size: " << global_size << ".";
2442         return global_size;
2443 }
2444
2445 bool device_cuda_init(void)
2446 {
2447 #ifdef WITH_CUDA_DYNLOAD
2448         static bool initialized = false;
2449         static bool result = false;
2450
2451         if(initialized)
2452                 return result;
2453
2454         initialized = true;
2455         int cuew_result = cuewInit();
2456         if(cuew_result == CUEW_SUCCESS) {
2457                 VLOG(1) << "CUEW initialization succeeded";
2458                 if(CUDADevice::have_precompiled_kernels()) {
2459                         VLOG(1) << "Found precompiled kernels";
2460                         result = true;
2461                 }
2462 #ifndef _WIN32
2463                 else if(cuewCompilerPath() != NULL) {
2464                         VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
2465                         result = true;
2466                 }
2467                 else {
2468                         VLOG(1) << "Neither precompiled kernels nor CUDA compiler wad found,"
2469                                 << " unable to use CUDA";
2470                 }
2471 #endif
2472         }
2473         else {
2474                 VLOG(1) << "CUEW initialization failed: "
2475                         << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED)
2476                             ? "Error setting up atexit() handler"
2477                             : "Error opening the library");
2478         }
2479
2480         return result;
2481 #else  /* WITH_CUDA_DYNLOAD */
2482         return true;
2483 #endif /* WITH_CUDA_DYNLOAD */
2484 }
2485
2486 Device *device_cuda_create(DeviceInfo& info, Stats &stats, bool background)
2487 {
2488         return new CUDADevice(info, stats, background);
2489 }
2490
2491 static CUresult device_cuda_safe_init()
2492 {
2493 #ifdef _WIN32
2494         __try {
2495                 return cuInit(0);
2496         }
2497         __except(EXCEPTION_EXECUTE_HANDLER) {
2498                 /* Ignore crashes inside the CUDA driver and hope we can
2499                  * survive even with corrupted CUDA installs. */
2500                 fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");
2501         }
2502
2503         return CUDA_ERROR_NO_DEVICE;
2504 #else
2505         return cuInit(0);
2506 #endif
2507 }
2508
2509 void device_cuda_info(vector<DeviceInfo>& devices)
2510 {
2511         CUresult result = device_cuda_safe_init();
2512         if(result != CUDA_SUCCESS) {
2513                 if(result != CUDA_ERROR_NO_DEVICE)
2514                         fprintf(stderr, "CUDA cuInit: %s\n", cuewErrorString(result));
2515                 return;
2516         }
2517
2518         int count = 0;
2519         result = cuDeviceGetCount(&count);
2520         if(result != CUDA_SUCCESS) {
2521                 fprintf(stderr, "CUDA cuDeviceGetCount: %s\n", cuewErrorString(result));
2522                 return;
2523         }
2524
2525         vector<DeviceInfo> display_devices;
2526
2527         for(int num = 0; num < count; num++) {
2528                 char name[256];
2529
2530                 result = cuDeviceGetName(name, 256, num);
2531                 if(result != CUDA_SUCCESS) {
2532                         fprintf(stderr, "CUDA cuDeviceGetName: %s\n", cuewErrorString(result));
2533                         continue;
2534                 }
2535
2536                 int major;
2537                 cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
2538                 if(major < 2) {
2539                         VLOG(1) << "Ignoring device \"" << name
2540                                 << "\", compute capability is too low.";
2541                         continue;
2542                 }
2543
2544                 DeviceInfo info;
2545
2546                 info.type = DEVICE_CUDA;
2547                 info.description = string(name);
2548                 info.num = num;
2549
2550                 info.advanced_shading = (major >= 2);
2551                 info.has_fermi_limits = !(major >= 3);
2552                 info.has_half_images = (major >= 3);
2553                 info.has_volume_decoupled = false;
2554                 info.bvh_layout_mask = BVH_LAYOUT_BVH2;
2555
2556                 int pci_location[3] = {0, 0, 0};
2557                 cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
2558                 cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
2559                 cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
2560                 info.id = string_printf("CUDA_%s_%04x:%02x:%02x",
2561                                         name,
2562                                         (unsigned int)pci_location[0],
2563                                         (unsigned int)pci_location[1],
2564                                         (unsigned int)pci_location[2]);
2565
2566                 /* If device has a kernel timeout and no compute preemption, we assume
2567                  * it is connected to a display and will freeze the display while doing
2568                  * computations. */
2569                 int timeout_attr = 0, preempt_attr = 0;
2570                 cuDeviceGetAttribute(&timeout_attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num);
2571                 cuDeviceGetAttribute(&preempt_attr, CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, num);
2572
2573                 if(timeout_attr && !preempt_attr) {
2574                         VLOG(1) << "Device is recognized as display.";
2575                         info.description += " (Display)";
2576                         info.display_device = true;
2577                         display_devices.push_back(info);
2578                 }
2579                 else {
2580                         devices.push_back(info);
2581                 }
2582                 VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
2583         }
2584
2585         if(!display_devices.empty())
2586                 devices.insert(devices.end(), display_devices.begin(), display_devices.end());
2587 }
2588
2589 string device_cuda_capabilities(void)
2590 {
2591         CUresult result = device_cuda_safe_init();
2592         if(result != CUDA_SUCCESS) {
2593                 if(result != CUDA_ERROR_NO_DEVICE) {
2594                         return string("Error initializing CUDA: ") + cuewErrorString(result);
2595                 }
2596                 return "No CUDA device found\n";
2597         }
2598
2599         int count;
2600         result = cuDeviceGetCount(&count);
2601         if(result != CUDA_SUCCESS) {
2602                 return string("Error getting devices: ") + cuewErrorString(result);
2603         }
2604
2605         string capabilities = "";
2606         for(int num = 0; num < count; num++) {
2607                 char name[256];
2608                 if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS) {
2609                         continue;
2610                 }
2611                 capabilities += string("\t") + name + "\n";
2612                 int value;
2613 #define GET_ATTR(attr) \
2614                 { \
2615                         if(cuDeviceGetAttribute(&value, \
2616                                                 CU_DEVICE_ATTRIBUTE_##attr, \
2617                                                 num) == CUDA_SUCCESS) \
2618                         { \
2619                                 capabilities += string_printf("\t\tCU_DEVICE_ATTRIBUTE_" #attr "\t\t\t%d\n", \
2620                                                               value); \
2621                         } \
2622                 } (void)0
2623                 /* TODO(sergey): Strip all attributes which are not useful for us
2624                  * or does not depend on the driver.
2625                  */
2626                 GET_ATTR(MAX_THREADS_PER_BLOCK);
2627                 GET_ATTR(MAX_BLOCK_DIM_X);
2628                 GET_ATTR(MAX_BLOCK_DIM_Y);
2629                 GET_ATTR(MAX_BLOCK_DIM_Z);
2630                 GET_ATTR(MAX_GRID_DIM_X);
2631                 GET_ATTR(MAX_GRID_DIM_Y);
2632                 GET_ATTR(MAX_GRID_DIM_Z);
2633                 GET_ATTR(MAX_SHARED_MEMORY_PER_BLOCK);
2634                 GET_ATTR(SHARED_MEMORY_PER_BLOCK);
2635                 GET_ATTR(TOTAL_CONSTANT_MEMORY);
2636                 GET_ATTR(WARP_SIZE);
2637                 GET_ATTR(MAX_PITCH);
2638                 GET_ATTR(MAX_REGISTERS_PER_BLOCK);
2639                 GET_ATTR(REGISTERS_PER_BLOCK);
2640                 GET_ATTR(CLOCK_RATE);
2641                 GET_ATTR(TEXTURE_ALIGNMENT);
2642                 GET_ATTR(GPU_OVERLAP);
2643                 GET_ATTR(MULTIPROCESSOR_COUNT);
2644                 GET_ATTR(KERNEL_EXEC_TIMEOUT);
2645                 GET_ATTR(INTEGRATED);
2646                 GET_ATTR(CAN_MAP_HOST_MEMORY);
2647                 GET_ATTR(COMPUTE_MODE);
2648                 GET_ATTR(MAXIMUM_TEXTURE1D_WIDTH);
2649                 GET_ATTR(MAXIMUM_TEXTURE2D_WIDTH);
2650                 GET_ATTR(MAXIMUM_TEXTURE2D_HEIGHT);
2651                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH);
2652                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT);
2653                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH);
2654                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_WIDTH);
2655                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_HEIGHT);
2656                 GET_ATTR(MAXIMUM_TEXTURE2D_LAYERED_LAYERS);
2657                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_WIDTH);
2658                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_HEIGHT);
2659                 GET_ATTR(MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES);
2660                 GET_ATTR(SURFACE_ALIGNMENT);
2661                 GET_ATTR(CONCURRENT_KERNELS);
2662                 GET_ATTR(ECC_ENABLED);
2663                 GET_ATTR(TCC_DRIVER);
2664                 GET_ATTR(MEMORY_CLOCK_RATE);
2665                 GET_ATTR(GLOBAL_MEMORY_BUS_WIDTH);
2666                 GET_ATTR(L2_CACHE_SIZE);
2667                 GET_ATTR(MAX_THREADS_PER_MULTIPROCESSOR);
2668                 GET_ATTR(ASYNC_ENGINE_COUNT);
2669                 GET_ATTR(UNIFIED_ADDRESSING);
2670                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_WIDTH);
2671                 GET_ATTR(MAXIMUM_TEXTURE1D_LAYERED_LAYERS);
2672                 GET_ATTR(CAN_TEX2D_GATHER);
2673                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_WIDTH);
2674                 GET_ATTR(MAXIMUM_TEXTURE2D_GATHER_HEIGHT);
2675                 GET_ATTR(MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE);
2676                 GET_ATTR(MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE);
2677                 GET_ATTR(MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE);
2678                 GET_ATTR(TEXTURE_PITCH_ALIGNMENT);
2679                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_WIDTH);
2680                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH);
2681                 GET_ATTR(MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS);
2682                 GET_ATTR(MAXIMUM_SURFACE1D_WIDTH);
2683                 GET_ATTR(MAXIMUM_SURFACE2D_WIDTH);
2684                 GET_ATTR(MAXIMUM_SURFACE2D_HEIGHT);
2685                 GET_ATTR(MAXIMUM_SURFACE3D_WIDTH);
2686                 GET_ATTR(MAXIMUM_SURFACE3D_HEIGHT);
2687                 GET_ATTR(MAXIMUM_SURFACE3D_DEPTH);
2688                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_WIDTH);
2689                 GET_ATTR(MAXIMUM_SURFACE1D_LAYERED_LAYERS);
2690                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_WIDTH);
2691                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_HEIGHT);
2692                 GET_ATTR(MAXIMUM_SURFACE2D_LAYERED_LAYERS);
2693                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_WIDTH);
2694                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH);
2695                 GET_ATTR(MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS);
2696                 GET_ATTR(MAXIMUM_TEXTURE1D_LINEAR_WIDTH);
2697                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_WIDTH);
2698                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_HEIGHT);
2699                 GET_ATTR(MAXIMUM_TEXTURE2D_LINEAR_PITCH);
2700                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH);
2701                 GET_ATTR(MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT);
2702                 GET_ATTR(COMPUTE_CAPABILITY_MAJOR);
2703                 GET_ATTR(COMPUTE_CAPABILITY_MINOR);
2704                 GET_ATTR(MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH);
2705                 GET_ATTR(STREAM_PRIORITIES_SUPPORTED);
2706                 GET_ATTR(GLOBAL_L1_CACHE_SUPPORTED);
2707                 GET_ATTR(LOCAL_L1_CACHE_SUPPORTED);
2708                 GET_ATTR(MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
2709                 GET_ATTR(MAX_REGISTERS_PER_MULTIPROCESSOR);
2710                 GET_ATTR(MANAGED_MEMORY);
2711                 GET_ATTR(MULTI_GPU_BOARD);
2712                 GET_ATTR(MULTI_GPU_BOARD_GROUP_ID);
2713 #undef GET_ATTR
2714                 capabilities += "\n";
2715         }
2716
2717         return capabilities;
2718 }
2719
2720 CCL_NAMESPACE_END