ae5f5cd1b3bea36e1fc85137fe50051cf8960a60
[blender.git] / intern / cycles / kernel / kernels / opencl / kernel_holdout_emission_blurring_pathtermination_ao.cl
1 /*
2  * Copyright 2011-2015 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "split/kernel_holdout_emission_blurring_pathtermination_ao.h"
18
19 __kernel void kernel_ocl_path_trace_holdout_emission_blurring_pathtermination_ao(
20         ccl_global char *globals,
21         ccl_constant KernelData *data,
22         ccl_global char *shader_data,          /* Required throughout the kernel except probabilistic path termination and AO */
23         ccl_global float *per_sample_output_buffers,
24         ccl_global uint *rng_coop,             /* Required for "kernel_write_data_passes" and AO */
25         ccl_global float3 *throughput_coop,    /* Required for handling holdout material and AO */
26         ccl_global float *L_transparent_coop,  /* Required for handling holdout material */
27         PathRadiance *PathRadiance_coop,       /* Required for "kernel_write_data_passes" and indirect primitive emission */
28         ccl_global PathState *PathState_coop,  /* Required throughout the kernel and AO */
29         Intersection *Intersection_coop,       /* Required for indirect primitive emission */
30         ccl_global float3 *AOAlpha_coop,       /* Required for AO */
31         ccl_global float3 *AOBSDF_coop,        /* Required for AO */
32         ccl_global Ray *AOLightRay_coop,       /* Required for AO */
33         int sw, int sh, int sx, int sy, int stride,
34         ccl_global char *ray_state,            /* Denotes the state of each ray */
35         ccl_global unsigned int *work_array,   /* Denotes the work that each ray belongs to */
36         ccl_global int *Queue_data,            /* Queue memory */
37         ccl_global int *Queue_index,           /* Tracks the number of elements in each queue */
38         int queuesize,                         /* Size (capacity) of each queue */
39 #ifdef __WORK_STEALING__
40         unsigned int start_sample,
41 #endif
42         int parallel_samples)                  /* Number of samples to be processed in parallel */
43 {
44         ccl_local unsigned int local_queue_atomics_bg;
45         ccl_local unsigned int local_queue_atomics_ao;
46         if(get_local_id(0) == 0 && get_local_id(1) == 0) {
47                 local_queue_atomics_bg = 0;
48                 local_queue_atomics_ao = 0;
49         }
50         barrier(CLK_LOCAL_MEM_FENCE);
51
52         char enqueue_flag = 0;
53         char enqueue_flag_AO_SHADOW_RAY_CAST = 0;
54         int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
55         ray_index = get_ray_index(ray_index,
56                                   QUEUE_ACTIVE_AND_REGENERATED_RAYS,
57                                   Queue_data,
58                                   queuesize,
59                                   0);
60
61 #ifdef __COMPUTE_DEVICE_GPU__
62         /* If we are executing on a GPU device, we exit all threads that are not
63          * required.
64          *
65          * If we are executing on a CPU device, then we need to keep all threads
66          * active since we have barrier() calls later in the kernel. CPU devices,
67          * expect all threads to execute barrier statement.
68          */
69         if(ray_index == QUEUE_EMPTY_SLOT) {
70                 return;
71         }
72 #endif  /* __COMPUTE_DEVICE_GPU__ */
73
74 #ifndef __COMPUTE_DEVICE_GPU__
75         if(ray_index != QUEUE_EMPTY_SLOT) {
76 #endif
77                 kernel_holdout_emission_blurring_pathtermination_ao(
78                         globals,
79                         data,
80                         shader_data,
81                         per_sample_output_buffers,
82                         rng_coop,
83                         throughput_coop,
84                         L_transparent_coop,
85                         PathRadiance_coop,
86                         PathState_coop,
87                         Intersection_coop,
88                         AOAlpha_coop,
89                         AOBSDF_coop,
90                         AOLightRay_coop,
91                         sw, sh, sx, sy, stride,
92                         ray_state,
93                         work_array,
94 #ifdef __WORK_STEALING__
95                         start_sample,
96 #endif
97                         parallel_samples,
98                         ray_index,
99                         &enqueue_flag,
100                         &enqueue_flag_AO_SHADOW_RAY_CAST);
101 #ifndef __COMPUTE_DEVICE_GPU__
102         }
103 #endif
104
105         /* Enqueue RAY_UPDATE_BUFFER rays. */
106         enqueue_ray_index_local(ray_index,
107                                 QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
108                                 enqueue_flag,
109                                 queuesize,
110                                 &local_queue_atomics_bg,
111                                 Queue_data,
112                                 Queue_index);
113
114 #ifdef __AO__
115         /* Enqueue to-shadow-ray-cast rays. */
116         enqueue_ray_index_local(ray_index,
117                                 QUEUE_SHADOW_RAY_CAST_AO_RAYS,
118                                 enqueue_flag_AO_SHADOW_RAY_CAST,
119                                 queuesize,
120                                 &local_queue_atomics_ao,
121                                 Queue_data,
122                                 Queue_index);
123 #endif
124 }