Cycles: Improve denoising speed on GPUs with small tile sizes
[blender.git] / intern / cycles / kernel / kernels / cuda / kernel.cu
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /* CUDA kernel entry points */
18
19 #ifdef __CUDA_ARCH__
20
21 #include "kernel/kernel_compat_cuda.h"
22 #include "kernel_config.h"
23
24 #include "util/util_atomic.h"
25
26 #include "kernel/kernel_math.h"
27 #include "kernel/kernel_types.h"
28 #include "kernel/kernel_globals.h"
29 #include "kernel/kernels/cuda/kernel_cuda_image.h"
30 #include "kernel/kernel_film.h"
31 #include "kernel/kernel_path.h"
32 #include "kernel/kernel_path_branched.h"
33 #include "kernel/kernel_bake.h"
34 #include "kernel/kernel_work_stealing.h"
35
36 /* kernels */
37 extern "C" __global__ void
38 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
39 kernel_cuda_path_trace(WorkTile *tile, uint total_work_size)
40 {
41         int work_index = ccl_global_id(0);
42
43         if(work_index < total_work_size) {
44                 uint x, y, sample;
45                 get_work_pixel(tile, work_index, &x, &y, &sample);
46
47                 KernelGlobals kg;
48                 kernel_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
49         }
50 }
51
52 #ifdef __BRANCHED_PATH__
53 extern "C" __global__ void
54 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
55 kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size)
56 {
57         int work_index = ccl_global_id(0);
58
59         if(work_index < total_work_size) {
60                 uint x, y, sample;
61                 get_work_pixel(tile, work_index, &x, &y, &sample);
62
63                 KernelGlobals kg;
64                 kernel_branched_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
65         }
66 }
67 #endif
68
69 extern "C" __global__ void
70 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
71 kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
72 {
73         int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
74         int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
75
76         if(x < sx + sw && y < sy + sh) {
77                 kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
78         }
79 }
80
81 extern "C" __global__ void
82 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
83 kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
84 {
85         int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
86         int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
87
88         if(x < sx + sw && y < sy + sh) {
89                 kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
90         }
91 }
92
93 extern "C" __global__ void
94 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
95 kernel_cuda_displace(uint4 *input,
96                      float4 *output,
97                      int type,
98                      int sx,
99                      int sw,
100                      int offset,
101                      int sample)
102 {
103         int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
104
105         if(x < sx + sw) {
106                 KernelGlobals kg;
107                 kernel_displace_evaluate(&kg, input, output, x);
108         }
109 }
110
111 extern "C" __global__ void
112 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
113 kernel_cuda_background(uint4 *input,
114                        float4 *output,
115                        int type,
116                        int sx,
117                        int sw,
118                        int offset,
119                        int sample)
120 {
121         int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
122
123         if(x < sx + sw) {
124                 KernelGlobals kg;
125                 kernel_background_evaluate(&kg, input, output, x);
126         }
127 }
128
129 #ifdef __BAKING__
130 extern "C" __global__ void
131 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
132 kernel_cuda_bake(uint4 *input, float4 *output, int type, int filter, int sx, int sw, int offset, int sample)
133 {
134         int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
135
136         if(x < sx + sw) {
137                 KernelGlobals kg;
138                 kernel_bake_evaluate(&kg, input, output, (ShaderEvalType)type, filter, x, offset, sample);
139         }
140 }
141 #endif
142
143 #endif
144