2 * Copyright 2011-2013 Blender Foundation
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __KERNEL_COMPAT_CUDA_H__
18 #define __KERNEL_COMPAT_CUDA_H__
20 #define __KERNEL_GPU__
21 #define __KERNEL_CUDA__
22 #define CCL_NAMESPACE_BEGIN
23 #define CCL_NAMESPACE_END
25 /* Selective nodes compilation. */
26 #ifndef __NODES_MAX_GROUP__
27 # define __NODES_MAX_GROUP__ NODE_GROUP_LEVEL_MAX
29 #ifndef __NODES_FEATURES__
30 # define __NODES_FEATURES__ NODE_FEATURE_ALL
34 #include <cuda_fp16.h>
38 /* Qualifier wrappers for different names on different devices */
40 #define ccl_device __device__ __inline__
41 # define ccl_device_forceinline __device__ __forceinline__
42 #if __CUDA_ARCH__ < 500
43 # define ccl_device_inline __device__ __forceinline__
45 # define ccl_device_inline __device__ __inline__
47 #define ccl_device_noinline __device__ __noinline__
50 #define ccl_local __shared__
51 #define ccl_local_param
54 #define ccl_addr_space
55 #define ccl_restrict __restrict__
56 #define ccl_align(n) __align__(n)
58 #define CCL_MAX_LOCAL_SIZE (CUDA_THREADS_BLOCK_WIDTH*CUDA_THREADS_BLOCK_WIDTH)
61 /* No assert supported for CUDA */
63 #define kernel_assert(cond)
67 #include "util/util_half.h"
68 #include "util/util_types.h"
70 /* Work item functions */
72 ccl_device_inline uint ccl_local_id(uint d)
75 case 0: return threadIdx.x;
76 case 1: return threadIdx.y;
77 case 2: return threadIdx.z;
82 #define ccl_global_id(d) (ccl_group_id(d) * ccl_local_size(d) + ccl_local_id(d))
84 ccl_device_inline uint ccl_local_size(uint d)
87 case 0: return blockDim.x;
88 case 1: return blockDim.y;
89 case 2: return blockDim.z;
94 #define ccl_global_size(d) (ccl_num_groups(d) * ccl_local_size(d))
96 ccl_device_inline uint ccl_group_id(uint d)
99 case 0: return blockIdx.x;
100 case 1: return blockIdx.y;
101 case 2: return blockIdx.z;
106 ccl_device_inline uint ccl_num_groups(uint d)
109 case 0: return gridDim.x;
110 case 1: return gridDim.y;
111 case 2: return gridDim.z;
118 typedef texture<float4, 1> texture_float4;
119 typedef texture<float2, 1> texture_float2;
120 typedef texture<float, 1> texture_float;
121 typedef texture<uint, 1> texture_uint;
122 typedef texture<int, 1> texture_int;
123 typedef texture<uint4, 1> texture_uint4;
124 typedef texture<uchar, 1> texture_uchar;
125 typedef texture<uchar4, 1> texture_uchar4;
126 typedef texture<float4, 2> texture_image_float4;
127 typedef texture<float4, 3> texture_image3d_float4;
128 typedef texture<uchar4, 2, cudaReadModeNormalizedFloat> texture_image_uchar4;
130 /* Macros to handle different memory storage on different devices */
132 /* On Fermi cards (4xx and 5xx), we use regular textures for both data and images.
133 * On Kepler (6xx) and above, we use Bindless Textures for images and arrays for data.
135 * Arrays are necessary in order to use the full VRAM on newer cards, and it's slightly faster.
136 * Using Arrays on Fermi turned out to be slower.*/
139 #if __CUDA_ARCH__ < 300
140 # define __KERNEL_CUDA_TEX_STORAGE__
141 # define kernel_tex_fetch(t, index) tex1Dfetch(t, index)
143 # define kernel_tex_image_interp(t, x, y) tex2D(t, x, y)
144 # define kernel_tex_image_interp_3d(t, x, y, z) tex3D(t, x, y, z)
148 # define kernel_tex_fetch(t, index) t[(index)]
150 # define kernel_tex_image_interp_float4(t, x, y) tex2D<float4>(t, x, y)
151 # define kernel_tex_image_interp_float(t, x, y) tex2D<float>(t, x, y)
152 # define kernel_tex_image_interp_3d_float4(t, x, y, z) tex3D<float4>(t, x, y, z)
153 # define kernel_tex_image_interp_3d_float(t, x, y, z) tex3D<float>(t, x, y, z)
156 #define kernel_data __data
158 /* Use fast math functions */
160 #define cosf(x) __cosf(((float)(x)))
161 #define sinf(x) __sinf(((float)(x)))
162 #define powf(x, y) __powf(((float)(x)), ((float)(y)))
163 #define tanf(x) __tanf(((float)(x)))
164 #define logf(x) __logf(((float)(x)))
165 #define expf(x) __expf(((float)(x)))
167 #endif /* __KERNEL_COMPAT_CUDA_H__ */