2 * Copyright 2011-2013 Blender Foundation
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* CUDA kernel entry points */
21 #include "kernel/kernel_compat_cuda.h"
22 #include "kernel_config.h"
24 #include "util/util_atomic.h"
26 #include "kernel/kernel_math.h"
27 #include "kernel/kernel_types.h"
28 #include "kernel/kernel_globals.h"
29 #include "kernel/kernels/cuda/kernel_cuda_image.h"
30 #include "kernel/kernel_film.h"
31 #include "kernel/kernel_path.h"
32 #include "kernel/kernel_path_branched.h"
33 #include "kernel/kernel_bake.h"
34 #include "kernel/kernel_work_stealing.h"
37 extern "C" __global__ void
38 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
39 kernel_cuda_path_trace(WorkTile *tile, uint total_work_size)
41 int work_index = ccl_global_id(0);
43 if(work_index < total_work_size) {
45 get_work_pixel(tile, work_index, &x, &y, &sample);
48 kernel_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
52 #ifdef __BRANCHED_PATH__
53 extern "C" __global__ void
54 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
55 kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size)
57 int work_index = ccl_global_id(0);
59 if(work_index < total_work_size) {
61 get_work_pixel(tile, work_index, &x, &y, &sample);
64 kernel_branched_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
69 extern "C" __global__ void
70 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
71 kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
73 int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
74 int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
76 if(x < sx + sw && y < sy + sh) {
77 kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
81 extern "C" __global__ void
82 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
83 kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
85 int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
86 int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
88 if(x < sx + sw && y < sy + sh) {
89 kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
93 extern "C" __global__ void
94 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
95 kernel_cuda_displace(uint4 *input,
103 int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
107 kernel_displace_evaluate(&kg, input, output, x);
111 extern "C" __global__ void
112 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
113 kernel_cuda_background(uint4 *input,
121 int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
125 kernel_background_evaluate(&kg, input, output, x);
130 extern "C" __global__ void
131 CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
132 kernel_cuda_bake(uint4 *input, float4 *output, int type, int filter, int sx, int sw, int offset, int sample)
134 int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
138 kernel_bake_evaluate(&kg, input, output, (ShaderEvalType)type, filter, x, offset, sample);