Cycles: Added Cryptomatte output.
[blender.git] / intern / cycles / kernel / split / kernel_buffer_update.h
1 /*
2  * Copyright 2011-2015 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /* This kernel takes care of rays that hit the background (sceneintersect
20  * kernel), and for the rays of state RAY_UPDATE_BUFFER it updates the ray's
21  * accumulated radiance in the output buffer. This kernel also takes care of
22  * rays that have been determined to-be-regenerated.
23  *
24  * We will empty QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue in this kernel.
25  *
26  * Typically all rays that are in state RAY_HIT_BACKGROUND, RAY_UPDATE_BUFFER
27  * will be eventually set to RAY_TO_REGENERATE state in this kernel.
28  * Finally all rays of ray_state RAY_TO_REGENERATE will be regenerated and put
29  * in queue QUEUE_ACTIVE_AND_REGENERATED_RAYS.
30  *
31  * State of queues when this kernel is called:
32  * At entry,
33  *   - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays.
34  *   - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
35  *     RAY_UPDATE_BUFFER, RAY_HIT_BACKGROUND, RAY_TO_REGENERATE rays.
36  * At exit,
37  *   - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and
38  *     RAY_REGENERATED rays.
39  *   - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty.
40  */
41 ccl_device void kernel_buffer_update(KernelGlobals *kg,
42                                      ccl_local_param unsigned int *local_queue_atomics)
43 {
44         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
45                 *local_queue_atomics = 0;
46         }
47         ccl_barrier(CCL_LOCAL_MEM_FENCE);
48
49         int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
50         if(ray_index == 0) {
51                 /* We will empty this queue in this kernel. */
52                 kernel_split_params.queue_index[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] = 0;
53         }
54         char enqueue_flag = 0;
55         ray_index = get_ray_index(kg, ray_index,
56                                   QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
57                                   kernel_split_state.queue_data,
58                                   kernel_split_params.queue_size,
59                                   1);
60
61 #ifdef __COMPUTE_DEVICE_GPU__
62         /* If we are executing on a GPU device, we exit all threads that are not
63          * required.
64          *
65          * If we are executing on a CPU device, then we need to keep all threads
66          * active since we have barrier() calls later in the kernel. CPU devices,
67          * expect all threads to execute barrier statement.
68          */
69         if(ray_index == QUEUE_EMPTY_SLOT) {
70                 return;
71         }
72 #endif
73
74 #ifndef __COMPUTE_DEVICE_GPU__
75         if(ray_index != QUEUE_EMPTY_SLOT) {
76 #endif
77
78         ccl_global char *ray_state = kernel_split_state.ray_state;
79         ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
80         PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
81         ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
82         ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
83         bool ray_was_updated = false;
84
85         if(IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER)) {
86                 ray_was_updated = true;
87                 uint sample = state->sample;
88                 uint buffer_offset = kernel_split_state.buffer_offset[ray_index];
89                 ccl_global float *buffer = kernel_split_params.tile.buffer + buffer_offset;
90
91                 /* accumulate result in output buffer */
92                 kernel_write_result(kg, buffer, sample, L);
93
94                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_TO_REGENERATE);
95         }
96
97         if(kernel_data.film.cryptomatte_passes) {
98                 /* Make sure no thread is writing to the buffers. */
99                 ccl_barrier(CCL_LOCAL_MEM_FENCE);
100                 if(ray_was_updated && state->sample - 1 == kernel_data.integrator.aa_samples) {
101                         uint buffer_offset = kernel_split_state.buffer_offset[ray_index];
102                         ccl_global float *buffer = kernel_split_params.tile.buffer + buffer_offset;
103                         ccl_global float *cryptomatte_buffer = buffer + kernel_data.film.pass_cryptomatte;
104                         kernel_sort_id_slots(cryptomatte_buffer, 2 * kernel_data.film.cryptomatte_depth);
105                 }
106         }
107
108         if(IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
109                 /* We have completed current work; So get next work */
110                 ccl_global uint *work_pools = kernel_split_params.work_pools;
111                 uint total_work_size = kernel_split_params.total_work_size;
112                 uint work_index;
113
114                 if(!get_next_work(kg, work_pools, total_work_size, ray_index, &work_index)) {
115                         /* If work is invalid, this means no more work is available and the thread may exit */
116                         ASSIGN_RAY_STATE(ray_state, ray_index, RAY_INACTIVE);
117                 }
118
119                 if(IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
120                         ccl_global WorkTile *tile = &kernel_split_params.tile;
121                         uint x, y, sample;
122                         get_work_pixel(tile, work_index, &x, &y, &sample);
123
124                         /* Store buffer offset for writing to passes. */
125                         uint buffer_offset = (tile->offset + x + y*tile->stride) * kernel_data.film.pass_stride;
126                         kernel_split_state.buffer_offset[ray_index] = buffer_offset;
127
128                         /* Initialize random numbers and ray. */
129                         uint rng_hash;
130                         kernel_path_trace_setup(kg, sample, x, y, &rng_hash, ray);
131
132                         if(ray->t != 0.0f) {
133                                 /* Initialize throughput, path radiance, Ray, PathState;
134                                  * These rays proceed with path-iteration.
135                                  */
136                                 *throughput = make_float3(1.0f, 1.0f, 1.0f);
137                                 path_radiance_init(L, kernel_data.film.use_light_pass);
138                                 path_state_init(kg,
139                                                 AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]),
140                                                 state,
141                                                 rng_hash,
142                                                 sample,
143                                                 ray);
144 #ifdef __SUBSURFACE__
145                                 kernel_path_subsurface_init_indirect(&kernel_split_state.ss_rays[ray_index]);
146 #endif
147                                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
148                                 enqueue_flag = 1;
149                         }
150                         else {
151                                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_TO_REGENERATE);
152                         }
153                 }
154         }
155
156 #ifndef __COMPUTE_DEVICE_GPU__
157         }
158 #endif
159
160         /* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
161          * These rays will be made active during next SceneIntersectkernel.
162          */
163         enqueue_ray_index_local(ray_index,
164                                 QUEUE_ACTIVE_AND_REGENERATED_RAYS,
165                                 enqueue_flag,
166                                 kernel_split_params.queue_size,
167                                 local_queue_atomics,
168                                 kernel_split_state.queue_data,
169                                 kernel_split_params.queue_index);
170 }
171
172 CCL_NAMESPACE_END