Cycles: Replace __MAX_CLOSURE__ build option with runtime integrator variable
[blender.git] / intern / cycles / kernel / split / kernel_shader_sort.h
1 /*
2  * Copyright 2011-2017 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19
20 ccl_device void kernel_shader_sort(KernelGlobals *kg,
21                                    ccl_local_param ShaderSortLocals *locals)
22 {
23 #ifndef __KERNEL_CUDA__
24         int tid = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
25         uint qsize = kernel_split_params.queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS];
26         if(tid == 0) {
27                 kernel_split_params.queue_index[QUEUE_SHADER_SORTED_RAYS] = qsize;
28         }
29
30         uint offset = (tid/SHADER_SORT_LOCAL_SIZE)*SHADER_SORT_BLOCK_SIZE;
31         if(offset >= qsize) {
32                 return;
33         }
34
35         int lid = ccl_local_id(1) * ccl_local_size(0) + ccl_local_id(0);
36         uint input = QUEUE_ACTIVE_AND_REGENERATED_RAYS * (kernel_split_params.queue_size);
37         uint output = QUEUE_SHADER_SORTED_RAYS * (kernel_split_params.queue_size);
38         ccl_local uint *local_value = &locals->local_value[0];
39         ccl_local ushort *local_index = &locals->local_index[0];
40
41         /* copy to local memory */
42         for(uint i = 0; i < SHADER_SORT_BLOCK_SIZE; i += SHADER_SORT_LOCAL_SIZE) {
43                 uint idx = offset + i + lid;
44                 uint add = input + idx;
45                 uint value = (~0);
46                 if(idx < qsize) {
47                         int ray_index = kernel_split_state.queue_data[add];
48                         bool valid = (ray_index != QUEUE_EMPTY_SLOT) && IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE);
49                         if(valid) {
50                                 value = kernel_split_sd(sd, ray_index)->shader & SHADER_MASK;
51                         }
52                 }
53                 local_value[i + lid] = value;
54                 local_index[i + lid] = i + lid;
55         }
56         ccl_barrier(CCL_LOCAL_MEM_FENCE);
57
58         /* skip sorting for cpu split kernel */
59 #  ifdef __KERNEL_OPENCL__
60
61         /* bitonic sort */
62         for(uint length = 1; length < SHADER_SORT_BLOCK_SIZE; length <<= 1) {
63                 for(uint inc = length; inc > 0; inc >>= 1) {
64                         for(uint ii = 0; ii < SHADER_SORT_BLOCK_SIZE; ii += SHADER_SORT_LOCAL_SIZE) {
65                                 uint i = lid + ii;
66                                 bool direction = ((i & (length << 1)) != 0);
67                                 uint j = i ^ inc;
68                                 ushort ioff = local_index[i];
69                                 ushort joff = local_index[j];
70                                 uint iKey = local_value[ioff];
71                                 uint jKey = local_value[joff];
72                                 bool smaller = (jKey < iKey) || (jKey == iKey && j < i);
73                                 bool swap = smaller ^ (j < i) ^ direction;
74                                 ccl_barrier(CCL_LOCAL_MEM_FENCE);
75                                 local_index[i] = (swap) ? joff : ioff;
76                                 local_index[j] = (swap) ? ioff : joff;
77                                 ccl_barrier(CCL_LOCAL_MEM_FENCE);
78                         }
79                 }
80         }
81 #  endif /* __KERNEL_OPENCL__ */
82
83         /* copy to destination */
84         for(uint i = 0; i < SHADER_SORT_BLOCK_SIZE; i += SHADER_SORT_LOCAL_SIZE) {
85                 uint idx = offset + i + lid;
86                 uint lidx = local_index[i + lid];
87                 uint outi = output + idx;
88                 uint ini = input + offset + lidx;
89                 uint value = local_value[lidx];
90                 if(idx < qsize) {
91                         kernel_split_state.queue_data[outi] = (value == (~0)) ? QUEUE_EMPTY_SLOT : kernel_split_state.queue_data[ini];
92                 }
93         }
94 #endif /* __KERNEL_CUDA__ */
95 }
96
97 CCL_NAMESPACE_END