ClangFormat: apply to source, most of intern
[blender.git] / source / blender / blenkernel / intern / subdiv_ccg.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2018 by Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup bke
22  */
23
24 #include "BKE_subdiv_ccg.h"
25
26 #include "DNA_mesh_types.h"
27 #include "DNA_meshdata_types.h"
28
29 #include "MEM_guardedalloc.h"
30
31 #include "BLI_math_bits.h"
32 #include "BLI_math_vector.h"
33 #include "BLI_task.h"
34
35 #include "BKE_DerivedMesh.h"
36 #include "BKE_ccg.h"
37 #include "BKE_mesh.h"
38 #include "BKE_subdiv.h"
39 #include "BKE_subdiv_eval.h"
40
41 #include "opensubdiv_topology_refiner_capi.h"
42
43 /* =============================================================================
44  * Various forward declarations.
45  */
46
47 static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key);
48
49 static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
50                                                 CCGKey *key,
51                                                 SubdivCCGFace *face);
52
53 /* =============================================================================
54  * Generally useful internal helpers.
55  */
56
57 /* Number of floats in per-vertex elements.  */
58 static int num_element_float_get(const SubdivCCG *subdiv_ccg)
59 {
60   /* We always have 3 floats for coordinate. */
61   int num_floats = 3;
62   if (subdiv_ccg->has_normal) {
63     num_floats += 3;
64   }
65   if (subdiv_ccg->has_mask) {
66     num_floats += 1;
67   }
68   return num_floats;
69 }
70
71 /* Per-vertex element size in bytes. */
72 static int element_size_bytes_get(const SubdivCCG *subdiv_ccg)
73 {
74   return sizeof(float) * num_element_float_get(subdiv_ccg);
75 }
76
77 /* =============================================================================
78  * Internal helpers for CCG creation.
79  */
80
81 static void subdiv_ccg_init_layers(SubdivCCG *subdiv_ccg, const SubdivToCCGSettings *settings)
82 {
83   /* CCG always contains coordinates. Rest of layers are coming after them. */
84   int layer_offset = sizeof(float) * 3;
85   /* Mask. */
86   if (settings->need_mask) {
87     subdiv_ccg->has_mask = true;
88     subdiv_ccg->mask_offset = layer_offset;
89     layer_offset += sizeof(float);
90   }
91   else {
92     subdiv_ccg->has_mask = false;
93     subdiv_ccg->mask_offset = -1;
94   }
95   /* Normals.
96    *
97    * NOTE: Keep them at the end, matching old CCGDM. Doesn't really matter
98    * here, but some other area might in theory depend memory layout. */
99   if (settings->need_normal) {
100     subdiv_ccg->has_normal = true;
101     subdiv_ccg->normal_offset = layer_offset;
102     layer_offset += sizeof(float) * 3;
103   }
104   else {
105     subdiv_ccg->has_normal = false;
106     subdiv_ccg->normal_offset = -1;
107   }
108 }
109
110 /* TODO(sergey): Make it more accessible function. */
111 static int topology_refiner_count_face_corners(OpenSubdiv_TopologyRefiner *topology_refiner)
112 {
113   const int num_faces = topology_refiner->getNumFaces(topology_refiner);
114   int num_corners = 0;
115   for (int face_index = 0; face_index < num_faces; face_index++) {
116     num_corners += topology_refiner->getNumFaceVertices(topology_refiner, face_index);
117   }
118   return num_corners;
119 }
120
121 /* NOTE: Grid size and layer flags are to be filled in before calling this
122  * function. */
123 static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
124 {
125   OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
126   const int element_size = element_size_bytes_get(subdiv_ccg);
127   /* Allocate memory for surface grids. */
128   const int num_faces = topology_refiner->getNumFaces(topology_refiner);
129   const int num_grids = topology_refiner_count_face_corners(topology_refiner);
130   const int grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
131   const int grid_area = grid_size * grid_size;
132   subdiv_ccg->num_grids = num_grids;
133   subdiv_ccg->grids = MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids");
134   subdiv_ccg->grids_storage = MEM_calloc_arrayN(
135       num_grids, ((size_t)grid_area) * element_size, "subdiv ccg grids storage");
136   const size_t grid_size_in_bytes = (size_t)grid_area * element_size;
137   for (int grid_index = 0; grid_index < num_grids; grid_index++) {
138     const size_t grid_offset = grid_size_in_bytes * grid_index;
139     subdiv_ccg->grids[grid_index] = (CCGElem *)&subdiv_ccg->grids_storage[grid_offset];
140   }
141   /* Grid material flags. */
142   subdiv_ccg->grid_flag_mats = MEM_calloc_arrayN(
143       num_grids, sizeof(DMFlagMat), "ccg grid material flags");
144   /* Grid hidden flags. */
145   subdiv_ccg->grid_hidden = MEM_calloc_arrayN(
146       num_grids, sizeof(BLI_bitmap *), "ccg grid material flags");
147   for (int grid_index = 0; grid_index < num_grids; grid_index++) {
148     subdiv_ccg->grid_hidden[grid_index] = BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
149   }
150   /* TODO(sergey): Allocate memory for loose elements. */
151   /* Allocate memory for faces. */
152   subdiv_ccg->num_faces = num_faces;
153   if (num_faces) {
154     subdiv_ccg->faces = MEM_calloc_arrayN(num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces");
155     subdiv_ccg->grid_faces = MEM_calloc_arrayN(
156         num_grids, sizeof(SubdivCCGFace *), "Subdiv CCG grid faces");
157   }
158 }
159
160 /* =============================================================================
161  * Grids evaluation.
162  */
163
164 typedef struct CCGEvalGridsData {
165   SubdivCCG *subdiv_ccg;
166   Subdiv *subdiv;
167   int *face_ptex_offset;
168   SubdivCCGMaskEvaluator *mask_evaluator;
169   SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator;
170 } CCGEvalGridsData;
171
172 static void subdiv_ccg_eval_grid_element(CCGEvalGridsData *data,
173                                          const int ptex_face_index,
174                                          const float u,
175                                          const float v,
176                                          unsigned char *element)
177 {
178   Subdiv *subdiv = data->subdiv;
179   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
180   if (subdiv->displacement_evaluator != NULL) {
181     BKE_subdiv_eval_final_point(subdiv, ptex_face_index, u, v, (float *)element);
182   }
183   else if (subdiv_ccg->has_normal) {
184     BKE_subdiv_eval_limit_point_and_normal(subdiv,
185                                            ptex_face_index,
186                                            u,
187                                            v,
188                                            (float *)element,
189                                            (float *)(element + subdiv_ccg->normal_offset));
190   }
191   else {
192     BKE_subdiv_eval_limit_point(subdiv, ptex_face_index, u, v, (float *)element);
193   }
194   if (subdiv_ccg->has_mask) {
195     float *mask_value_ptr = (float *)(element + subdiv_ccg->mask_offset);
196     if (data->mask_evaluator != NULL) {
197       *mask_value_ptr = data->mask_evaluator->eval_mask(
198           data->mask_evaluator, ptex_face_index, u, v);
199     }
200     else {
201       *mask_value_ptr = 0.0f;
202     }
203   }
204 }
205
206 static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data, const int face_index)
207 {
208   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
209   const int ptex_face_index = data->face_ptex_offset[face_index];
210   const int grid_size = subdiv_ccg->grid_size;
211   const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
212   const int element_size = element_size_bytes_get(subdiv_ccg);
213   SubdivCCGFace *faces = subdiv_ccg->faces;
214   SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
215   const SubdivCCGFace *face = &faces[face_index];
216   for (int corner = 0; corner < face->num_grids; corner++) {
217     const int grid_index = face->start_grid_index + corner;
218     unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
219     for (int y = 0; y < grid_size; y++) {
220       const float grid_v = (float)y * grid_size_1_inv;
221       for (int x = 0; x < grid_size; x++) {
222         const float grid_u = (float)x * grid_size_1_inv;
223         float u, v;
224         BKE_subdiv_rotate_grid_to_quad(corner, grid_u, grid_v, &u, &v);
225         const size_t grid_element_index = (size_t)y * grid_size + x;
226         const size_t grid_element_offset = grid_element_index * element_size;
227         subdiv_ccg_eval_grid_element(data, ptex_face_index, u, v, &grid[grid_element_offset]);
228       }
229     }
230     /* Assign grid's face. */
231     grid_faces[grid_index] = &faces[face_index];
232     /* Assign material flags. */
233     subdiv_ccg->grid_flag_mats[grid_index] = data->material_flags_evaluator->eval_material_flags(
234         data->material_flags_evaluator, face_index);
235   }
236 }
237
238 static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data, const int face_index)
239 {
240   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
241   const int grid_size = subdiv_ccg->grid_size;
242   const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
243   const int element_size = element_size_bytes_get(subdiv_ccg);
244   SubdivCCGFace *faces = subdiv_ccg->faces;
245   SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
246   const SubdivCCGFace *face = &faces[face_index];
247   for (int corner = 0; corner < face->num_grids; corner++) {
248     const int grid_index = face->start_grid_index + corner;
249     const int ptex_face_index = data->face_ptex_offset[face_index] + corner;
250     unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
251     for (int y = 0; y < grid_size; y++) {
252       const float u = 1.0f - ((float)y * grid_size_1_inv);
253       for (int x = 0; x < grid_size; x++) {
254         const float v = 1.0f - ((float)x * grid_size_1_inv);
255         const size_t grid_element_index = (size_t)y * grid_size + x;
256         const size_t grid_element_offset = grid_element_index * element_size;
257         subdiv_ccg_eval_grid_element(data, ptex_face_index, u, v, &grid[grid_element_offset]);
258       }
259     }
260     /* Assign grid's face. */
261     grid_faces[grid_index] = &faces[face_index];
262     /* Assign material flags. */
263     subdiv_ccg->grid_flag_mats[grid_index] = data->material_flags_evaluator->eval_material_flags(
264         data->material_flags_evaluator, face_index);
265   }
266 }
267
268 static void subdiv_ccg_eval_grids_task(void *__restrict userdata_v,
269                                        const int face_index,
270                                        const ParallelRangeTLS *__restrict UNUSED(tls))
271 {
272   CCGEvalGridsData *data = userdata_v;
273   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
274   SubdivCCGFace *face = &subdiv_ccg->faces[face_index];
275   if (face->num_grids == 4) {
276     subdiv_ccg_eval_regular_grid(data, face_index);
277   }
278   else {
279     subdiv_ccg_eval_special_grid(data, face_index);
280   }
281 }
282
283 static bool subdiv_ccg_evaluate_grids(SubdivCCG *subdiv_ccg,
284                                       Subdiv *subdiv,
285                                       SubdivCCGMaskEvaluator *mask_evaluator,
286                                       SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator)
287 {
288   OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
289   const int num_faces = topology_refiner->getNumFaces(topology_refiner);
290   /* Initialize data passed to all the tasks. */
291   CCGEvalGridsData data;
292   data.subdiv_ccg = subdiv_ccg;
293   data.subdiv = subdiv;
294   data.face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
295   data.mask_evaluator = mask_evaluator;
296   data.material_flags_evaluator = material_flags_evaluator;
297   /* Threaded grids evaluation. */
298   ParallelRangeSettings parallel_range_settings;
299   BLI_parallel_range_settings_defaults(&parallel_range_settings);
300   BLI_task_parallel_range(
301       0, num_faces, &data, subdiv_ccg_eval_grids_task, &parallel_range_settings);
302   /* If displacement is used, need to calculate normals after all final
303    * coordinates are known. */
304   if (subdiv->displacement_evaluator != NULL) {
305     BKE_subdiv_ccg_recalc_normals(subdiv_ccg);
306   }
307   return true;
308 }
309
310 /* Initialize face descriptors, assuming memory for them was already
311  * allocated. */
312 static void subdiv_ccg_init_faces(SubdivCCG *subdiv_ccg)
313 {
314   Subdiv *subdiv = subdiv_ccg->subdiv;
315   OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
316   const int num_faces = subdiv_ccg->num_faces;
317   int corner_index = 0;
318   for (int face_index = 0; face_index < num_faces; face_index++) {
319     const int num_corners = topology_refiner->getNumFaceVertices(topology_refiner, face_index);
320     subdiv_ccg->faces[face_index].num_grids = num_corners;
321     subdiv_ccg->faces[face_index].start_grid_index = corner_index;
322     corner_index += num_corners;
323   }
324 }
325
326 /* TODO(sergey): Consider making it generic enough to be fit into BLI. */
327 typedef struct StaticOrHeapIntStorage {
328   int static_storage[64];
329   int static_storage_size;
330   int *heap_storage;
331   int heap_storage_size;
332 } StaticOrHeapIntStorage;
333
334 static void static_or_heap_storage_init(StaticOrHeapIntStorage *storage)
335 {
336   storage->static_storage_size = sizeof(storage->static_storage) /
337                                  sizeof(*storage->static_storage);
338   storage->heap_storage = NULL;
339   storage->heap_storage_size = 0;
340 }
341
342 static int *static_or_heap_storage_get(StaticOrHeapIntStorage *storage, int size)
343 {
344   /* Requested size small enough to be fit into stack allocated memory. */
345   if (size <= storage->static_storage_size) {
346     return storage->static_storage;
347   }
348   /* Make sure heap ius big enough. */
349   if (size > storage->heap_storage_size) {
350     MEM_SAFE_FREE(storage->heap_storage);
351     storage->heap_storage = MEM_malloc_arrayN(size, sizeof(int), "int storage");
352     storage->heap_storage_size = size;
353   }
354   return storage->heap_storage;
355 }
356
357 static void static_or_heap_storage_free(StaticOrHeapIntStorage *storage)
358 {
359   MEM_SAFE_FREE(storage->heap_storage);
360 }
361
362 static void subdiv_ccg_allocate_adjacent_edges(SubdivCCG *subdiv_ccg, const int num_edges)
363 {
364   subdiv_ccg->num_adjacent_edges = num_edges;
365   subdiv_ccg->adjacent_edges = MEM_calloc_arrayN(
366       subdiv_ccg->num_adjacent_edges, sizeof(*subdiv_ccg->adjacent_edges), "ccg adjacent edges");
367 }
368
369 /* Returns storage where boundary elements are to be stored. */
370 static CCGElem **subdiv_ccg_adjacent_edge_add_face(SubdivCCG *subdiv_ccg,
371                                                    SubdivCCGAdjacentEdge *adjacent_edge,
372                                                    SubdivCCGFace *face)
373 {
374   const int grid_size = subdiv_ccg->grid_size * 2;
375   const int adjacent_face_index = adjacent_edge->num_adjacent_faces;
376   ++adjacent_edge->num_adjacent_faces;
377   /* Store new adjacent face. */
378   adjacent_edge->faces = MEM_reallocN(
379       adjacent_edge->faces, adjacent_edge->num_adjacent_faces * sizeof(*adjacent_edge->faces));
380   adjacent_edge->faces[adjacent_face_index] = face;
381   /* Allocate memory for the boundary elements. */
382   adjacent_edge->boundary_elements = MEM_reallocN(adjacent_edge->boundary_elements,
383                                                   adjacent_edge->num_adjacent_faces *
384                                                       sizeof(*adjacent_edge->boundary_elements));
385   adjacent_edge->boundary_elements[adjacent_face_index] = MEM_malloc_arrayN(
386       grid_size * 2, sizeof(CCGElem *), "ccg adjacent boundary");
387   return adjacent_edge->boundary_elements[adjacent_face_index];
388 }
389
390 static void subdiv_ccg_init_faces_edge_neighborhood(SubdivCCG *subdiv_ccg)
391 {
392   Subdiv *subdiv = subdiv_ccg->subdiv;
393   SubdivCCGFace *faces = subdiv_ccg->faces;
394   OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
395   const int num_edges = topology_refiner->getNumEdges(topology_refiner);
396   const int grid_size = subdiv_ccg->grid_size;
397   if (num_edges == 0) {
398     /* Early output, nothing to do in this case. */
399     return;
400   }
401   subdiv_ccg_allocate_adjacent_edges(subdiv_ccg, num_edges);
402   /* Initialize storage. */
403   StaticOrHeapIntStorage face_vertices_storage;
404   StaticOrHeapIntStorage face_edges_storage;
405   static_or_heap_storage_init(&face_vertices_storage);
406   static_or_heap_storage_init(&face_edges_storage);
407   /* Key to access elements. */
408   CCGKey key;
409   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
410   /* Store adjacency for all faces. */
411   const int num_faces = subdiv_ccg->num_faces;
412   for (int face_index = 0; face_index < num_faces; face_index++) {
413     SubdivCCGFace *face = &faces[face_index];
414     const int num_face_grids = face->num_grids;
415     const int num_face_edges = num_face_grids;
416     int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
417     topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
418     /* Note that order of edges is same as order of MLoops, which also
419      * means it's the same as order of grids. */
420     int *face_edges = static_or_heap_storage_get(&face_edges_storage, num_face_edges);
421     topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges);
422     /* Store grids adjacency for this edge. */
423     for (int corner = 0; corner < num_face_edges; corner++) {
424       const int vertex_index = face_vertices[corner];
425       const int edge_index = face_edges[corner];
426       int edge_vertices[2];
427       topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
428       const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
429       /* Grid which is adjacent to the current corner. */
430       const int current_grid_index = face->start_grid_index + corner;
431       CCGElem *current_grid = subdiv_ccg->grids[current_grid_index];
432       /* Grid which is adjacent to the next corner. */
433       const int next_grid_index = face->start_grid_index + (corner + 1) % num_face_grids;
434       CCGElem *next_grid = subdiv_ccg->grids[next_grid_index];
435       /* Add new face to the adjacent edge. */
436       SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
437       CCGElem **boundary_elements = subdiv_ccg_adjacent_edge_add_face(
438           subdiv_ccg, adjacent_edge, face);
439       /* Fill CCG elements along the edge. */
440       int boundary_element_index = 0;
441       if (is_edge_flipped) {
442         for (int i = 0; i < grid_size; i++) {
443           boundary_elements[boundary_element_index++] = CCG_grid_elem(
444               &key, next_grid, grid_size - i - 1, grid_size - 1);
445         }
446         for (int i = 0; i < grid_size; i++) {
447           boundary_elements[boundary_element_index++] = CCG_grid_elem(
448               &key, current_grid, grid_size - 1, i);
449         }
450       }
451       else {
452         for (int i = 0; i < grid_size; i++) {
453           boundary_elements[boundary_element_index++] = CCG_grid_elem(
454               &key, current_grid, grid_size - 1, grid_size - i - 1);
455         }
456         for (int i = 0; i < grid_size; i++) {
457           boundary_elements[boundary_element_index++] = CCG_grid_elem(
458               &key, next_grid, i, grid_size - 1);
459         }
460       }
461     }
462   }
463   /* Free possibly heap-allocated storage. */
464   static_or_heap_storage_free(&face_vertices_storage);
465   static_or_heap_storage_free(&face_edges_storage);
466 }
467
468 static void subdiv_ccg_allocate_adjacent_vertices(SubdivCCG *subdiv_ccg, const int num_vertices)
469 {
470   subdiv_ccg->num_adjacent_vertices = num_vertices;
471   subdiv_ccg->adjacent_vertices = MEM_calloc_arrayN(subdiv_ccg->num_adjacent_vertices,
472                                                     sizeof(*subdiv_ccg->adjacent_vertices),
473                                                     "ccg adjacent vertices");
474 }
475
476 /* Returns storage where corner elements are to be stored. This is a pointer
477  * to the actual storage. */
478 static CCGElem **subdiv_ccg_adjacent_vertex_add_face(SubdivCCGAdjacentVertex *adjacent_vertex,
479                                                      SubdivCCGFace *face)
480 {
481   const int adjacent_face_index = adjacent_vertex->num_adjacent_faces;
482   ++adjacent_vertex->num_adjacent_faces;
483   /* Store new adjacent face. */
484   adjacent_vertex->faces = MEM_reallocN(adjacent_vertex->faces,
485                                         adjacent_vertex->num_adjacent_faces *
486                                             sizeof(*adjacent_vertex->faces));
487   adjacent_vertex->faces[adjacent_face_index] = face;
488   /* Allocate memory for the boundary elements. */
489   adjacent_vertex->corner_elements = MEM_reallocN(adjacent_vertex->corner_elements,
490                                                   adjacent_vertex->num_adjacent_faces *
491                                                       sizeof(*adjacent_vertex->corner_elements));
492   return &adjacent_vertex->corner_elements[adjacent_face_index];
493 }
494
495 static void subdiv_ccg_init_faces_vertex_neighborhood(SubdivCCG *subdiv_ccg)
496 {
497   Subdiv *subdiv = subdiv_ccg->subdiv;
498   SubdivCCGFace *faces = subdiv_ccg->faces;
499   OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
500   const int num_vertices = topology_refiner->getNumVertices(topology_refiner);
501   const int grid_size = subdiv_ccg->grid_size;
502   if (num_vertices == 0) {
503     /* Early output, nothing to do in this case. */
504     return;
505   }
506   subdiv_ccg_allocate_adjacent_vertices(subdiv_ccg, num_vertices);
507   /* Initialize storage. */
508   StaticOrHeapIntStorage face_vertices_storage;
509   static_or_heap_storage_init(&face_vertices_storage);
510   /* Key to access elements. */
511   CCGKey key;
512   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
513   /* Store adjacency for all faces. */
514   const int num_faces = subdiv_ccg->num_faces;
515   for (int face_index = 0; face_index < num_faces; face_index++) {
516     SubdivCCGFace *face = &faces[face_index];
517     const int num_face_grids = face->num_grids;
518     const int num_face_edges = num_face_grids;
519     int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
520     topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
521     for (int corner = 0; corner < num_face_edges; corner++) {
522       const int vertex_index = face_vertices[corner];
523       /* Grid which is adjacent to the current corner. */
524       const int grid_index = face->start_grid_index + corner;
525       CCGElem *grid = subdiv_ccg->grids[grid_index];
526       /* Add new face to the adjacent edge. */
527       SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
528       CCGElem **corner_element = subdiv_ccg_adjacent_vertex_add_face(adjacent_vertex, face);
529       *corner_element = CCG_grid_elem(&key, grid, grid_size - 1, grid_size - 1);
530     }
531   }
532   /* Free possibly heap-allocated storage. */
533   static_or_heap_storage_free(&face_vertices_storage);
534 }
535
536 static void subdiv_ccg_init_faces_neighborhood(SubdivCCG *subdiv_ccg)
537 {
538   subdiv_ccg_init_faces_edge_neighborhood(subdiv_ccg);
539   subdiv_ccg_init_faces_vertex_neighborhood(subdiv_ccg);
540 }
541
542 /* =============================================================================
543  * Creation / evaluation.
544  */
545
546 SubdivCCG *BKE_subdiv_to_ccg(Subdiv *subdiv,
547                              const SubdivToCCGSettings *settings,
548                              SubdivCCGMaskEvaluator *mask_evaluator,
549                              SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator)
550 {
551   BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
552   SubdivCCG *subdiv_ccg = MEM_callocN(sizeof(SubdivCCG), "subdiv ccg");
553   subdiv_ccg->subdiv = subdiv;
554   subdiv_ccg->level = bitscan_forward_i(settings->resolution - 1);
555   subdiv_ccg->grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
556   subdiv_ccg_init_layers(subdiv_ccg, settings);
557   subdiv_ccg_alloc_elements(subdiv_ccg, subdiv);
558   subdiv_ccg_init_faces(subdiv_ccg);
559   subdiv_ccg_init_faces_neighborhood(subdiv_ccg);
560   if (!subdiv_ccg_evaluate_grids(subdiv_ccg, subdiv, mask_evaluator, material_flags_evaluator)) {
561     BKE_subdiv_ccg_destroy(subdiv_ccg);
562     BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
563     return NULL;
564   }
565   BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
566   return subdiv_ccg;
567 }
568
569 Mesh *BKE_subdiv_to_ccg_mesh(Subdiv *subdiv,
570                              const SubdivToCCGSettings *settings,
571                              const Mesh *coarse_mesh)
572 {
573   /* Make sure evaluator is ready. */
574   BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
575   if (!BKE_subdiv_eval_update_from_mesh(subdiv, coarse_mesh)) {
576     if (coarse_mesh->totpoly) {
577       return false;
578     }
579   }
580   BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
581   SubdivCCGMaskEvaluator mask_evaluator;
582   bool has_mask = BKE_subdiv_ccg_mask_init_from_paint(&mask_evaluator, coarse_mesh);
583   SubdivCCGMaterialFlagsEvaluator material_flags_evaluator;
584   BKE_subdiv_ccg_material_flags_init_from_mesh(&material_flags_evaluator, coarse_mesh);
585   SubdivCCG *subdiv_ccg = BKE_subdiv_to_ccg(
586       subdiv, settings, has_mask ? &mask_evaluator : NULL, &material_flags_evaluator);
587   if (has_mask) {
588     mask_evaluator.free(&mask_evaluator);
589   }
590   material_flags_evaluator.free(&material_flags_evaluator);
591   if (subdiv_ccg == NULL) {
592     return NULL;
593   }
594   Mesh *result = BKE_mesh_new_nomain_from_template(coarse_mesh, 0, 0, 0, 0, 0);
595   result->runtime.subdiv_ccg = subdiv_ccg;
596   return result;
597 }
598
599 void BKE_subdiv_ccg_destroy(SubdivCCG *subdiv_ccg)
600 {
601   const int num_grids = subdiv_ccg->num_grids;
602   MEM_SAFE_FREE(subdiv_ccg->grids);
603   MEM_SAFE_FREE(subdiv_ccg->grids_storage);
604   MEM_SAFE_FREE(subdiv_ccg->edges);
605   MEM_SAFE_FREE(subdiv_ccg->vertices);
606   MEM_SAFE_FREE(subdiv_ccg->grid_flag_mats);
607   if (subdiv_ccg->grid_hidden != NULL) {
608     for (int grid_index = 0; grid_index < num_grids; grid_index++) {
609       MEM_freeN(subdiv_ccg->grid_hidden[grid_index]);
610     }
611     MEM_freeN(subdiv_ccg->grid_hidden);
612   }
613   if (subdiv_ccg->subdiv != NULL) {
614     BKE_subdiv_free(subdiv_ccg->subdiv);
615   }
616   MEM_SAFE_FREE(subdiv_ccg->faces);
617   MEM_SAFE_FREE(subdiv_ccg->grid_faces);
618   /* Free map of adjacent edges. */
619   for (int i = 0; i < subdiv_ccg->num_adjacent_edges; i++) {
620     SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[i];
621     for (int face_index = 0; face_index < adjacent_edge->num_adjacent_faces; face_index++) {
622       MEM_SAFE_FREE(adjacent_edge->boundary_elements[face_index]);
623     }
624     MEM_SAFE_FREE(adjacent_edge->faces);
625     MEM_SAFE_FREE(adjacent_edge->boundary_elements);
626   }
627   MEM_SAFE_FREE(subdiv_ccg->adjacent_edges);
628   /* Free map of adjacent vertices. */
629   for (int i = 0; i < subdiv_ccg->num_adjacent_vertices; i++) {
630     SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[i];
631     MEM_SAFE_FREE(adjacent_vertex->faces);
632     MEM_SAFE_FREE(adjacent_vertex->corner_elements);
633   }
634   MEM_SAFE_FREE(subdiv_ccg->adjacent_vertices);
635   MEM_freeN(subdiv_ccg);
636 }
637
638 void BKE_subdiv_ccg_key(CCGKey *key, const SubdivCCG *subdiv_ccg, int level)
639 {
640   key->level = level;
641   key->elem_size = element_size_bytes_get(subdiv_ccg);
642   key->grid_size = BKE_subdiv_grid_size_from_level(level);
643   key->grid_area = key->grid_size * key->grid_size;
644   key->grid_bytes = key->elem_size * key->grid_area;
645
646   key->normal_offset = subdiv_ccg->normal_offset;
647   key->mask_offset = subdiv_ccg->mask_offset;
648
649   key->has_normals = subdiv_ccg->has_normal;
650   key->has_mask = subdiv_ccg->has_mask;
651 }
652
653 void BKE_subdiv_ccg_key_top_level(CCGKey *key, const SubdivCCG *subdiv_ccg)
654 {
655   BKE_subdiv_ccg_key(key, subdiv_ccg, subdiv_ccg->level);
656 }
657
658 /* =============================================================================
659  * Normals.
660  */
661
662 typedef struct RecalcInnerNormalsData {
663   SubdivCCG *subdiv_ccg;
664   CCGKey *key;
665 } RecalcInnerNormalsData;
666
667 typedef struct RecalcInnerNormalsTLSData {
668   float (*face_normals)[3];
669 } RecalcInnerNormalsTLSData;
670
671 /* Evaluate high-res face normals, for faces which corresponds to grid elements
672  *
673  *   {(x, y), {x + 1, y}, {x + 1, y + 1}, {x, y + 1}}
674  *
675  * The result is stored in normals storage from TLS. */
676 static void subdiv_ccg_recalc_inner_face_normals(SubdivCCG *subdiv_ccg,
677                                                  CCGKey *key,
678                                                  RecalcInnerNormalsTLSData *tls,
679                                                  const int grid_index)
680 {
681   const int grid_size = subdiv_ccg->grid_size;
682   const int grid_size_1 = grid_size - 1;
683   CCGElem *grid = subdiv_ccg->grids[grid_index];
684   if (tls->face_normals == NULL) {
685     tls->face_normals = MEM_malloc_arrayN(
686         grid_size_1 * grid_size_1, 3 * sizeof(float), "CCG TLS normals");
687   }
688   for (int y = 0; y < grid_size - 1; y++) {
689     for (int x = 0; x < grid_size - 1; x++) {
690       CCGElem *grid_elements[4] = {
691           CCG_grid_elem(key, grid, x, y + 1),
692           CCG_grid_elem(key, grid, x + 1, y + 1),
693           CCG_grid_elem(key, grid, x + 1, y),
694           CCG_grid_elem(key, grid, x, y),
695       };
696       float *co[4] = {
697           CCG_elem_co(key, grid_elements[0]),
698           CCG_elem_co(key, grid_elements[1]),
699           CCG_elem_co(key, grid_elements[2]),
700           CCG_elem_co(key, grid_elements[3]),
701       };
702       const int face_index = y * grid_size_1 + x;
703       float *face_normal = tls->face_normals[face_index];
704       normal_quad_v3(face_normal, co[0], co[1], co[2], co[3]);
705     }
706   }
707 }
708
709 /* Average normals at every grid element, using adjacent faces normals. */
710 static void subdiv_ccg_average_inner_face_normals(SubdivCCG *subdiv_ccg,
711                                                   CCGKey *key,
712                                                   RecalcInnerNormalsTLSData *tls,
713                                                   const int grid_index)
714 {
715   const int grid_size = subdiv_ccg->grid_size;
716   const int grid_size_1 = grid_size - 1;
717   CCGElem *grid = subdiv_ccg->grids[grid_index];
718   const float(*face_normals)[3] = tls->face_normals;
719   for (int y = 0; y < grid_size; y++) {
720     for (int x = 0; x < grid_size; x++) {
721       float normal_acc[3] = {0.0f, 0.0f, 0.0f};
722       int counter = 0;
723       /* Accumulate normals of all adjacent faces. */
724       if (x < grid_size_1 && y < grid_size_1) {
725         add_v3_v3(normal_acc, face_normals[y * grid_size_1 + x]);
726         counter++;
727       }
728       if (x >= 1) {
729         if (y < grid_size_1) {
730           add_v3_v3(normal_acc, face_normals[y * grid_size_1 + (x - 1)]);
731           counter++;
732         }
733         if (y >= 1) {
734           add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + (x - 1)]);
735           counter++;
736         }
737       }
738       if (y >= 1 && x < grid_size_1) {
739         add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + x]);
740         counter++;
741       }
742       /* Normalize and store. */
743       mul_v3_v3fl(CCG_grid_elem_no(key, grid, x, y), normal_acc, 1.0f / (float)counter);
744     }
745   }
746 }
747
748 static void subdiv_ccg_recalc_inner_normal_task(void *__restrict userdata_v,
749                                                 const int grid_index,
750                                                 const ParallelRangeTLS *__restrict tls_v)
751 {
752   RecalcInnerNormalsData *data = userdata_v;
753   RecalcInnerNormalsTLSData *tls = tls_v->userdata_chunk;
754   subdiv_ccg_recalc_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
755   subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
756 }
757
758 static void subdiv_ccg_recalc_inner_normal_finalize(void *__restrict UNUSED(userdata),
759                                                     void *__restrict tls_v)
760 {
761   RecalcInnerNormalsTLSData *tls = tls_v;
762   MEM_SAFE_FREE(tls->face_normals);
763 }
764
765 /* Recalculate normals which corresponds to non-boundaries elements of grids. */
766 static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
767 {
768   CCGKey key;
769   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
770   RecalcInnerNormalsData data = {
771       .subdiv_ccg = subdiv_ccg,
772       .key = &key,
773   };
774   RecalcInnerNormalsTLSData tls_data = {NULL};
775   ParallelRangeSettings parallel_range_settings;
776   BLI_parallel_range_settings_defaults(&parallel_range_settings);
777   parallel_range_settings.userdata_chunk = &tls_data;
778   parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
779   parallel_range_settings.func_finalize = subdiv_ccg_recalc_inner_normal_finalize;
780   BLI_task_parallel_range(0,
781                           subdiv_ccg->num_grids,
782                           &data,
783                           subdiv_ccg_recalc_inner_normal_task,
784                           &parallel_range_settings);
785 }
786
787 void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg)
788 {
789   if (!subdiv_ccg->has_normal) {
790     /* Grids don't have normals, can do early output. */
791     return;
792   }
793   subdiv_ccg_recalc_inner_grid_normals(subdiv_ccg);
794   BKE_subdiv_ccg_average_grids(subdiv_ccg);
795 }
796
797 typedef struct RecalcModifiedInnerNormalsData {
798   SubdivCCG *subdiv_ccg;
799   CCGKey *key;
800   SubdivCCGFace **effected_ccg_faces;
801 } RecalcModifiedInnerNormalsData;
802
803 static void subdiv_ccg_recalc_modified_inner_normal_task(void *__restrict userdata_v,
804                                                          const int face_index,
805                                                          const ParallelRangeTLS *__restrict tls_v)
806 {
807   RecalcModifiedInnerNormalsData *data = userdata_v;
808   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
809   CCGKey *key = data->key;
810   RecalcInnerNormalsTLSData *tls = tls_v->userdata_chunk;
811   SubdivCCGFace **faces = data->effected_ccg_faces;
812   SubdivCCGFace *face = faces[face_index];
813   const int num_face_grids = face->num_grids;
814   for (int i = 0; i < num_face_grids; i++) {
815     const int grid_index = face->start_grid_index + i;
816     subdiv_ccg_recalc_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
817     subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
818   }
819   subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
820 }
821
822 static void subdiv_ccg_recalc_modified_inner_normal_finalize(void *__restrict UNUSED(userdata),
823                                                              void *__restrict tls_v)
824 {
825   RecalcInnerNormalsTLSData *tls = tls_v;
826   MEM_SAFE_FREE(tls->face_normals);
827 }
828
829 static void subdiv_ccg_recalc_modified_inner_grid_normals(SubdivCCG *subdiv_ccg,
830                                                           struct CCGFace **effected_faces,
831                                                           int num_effected_faces)
832 {
833   CCGKey key;
834   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
835   RecalcModifiedInnerNormalsData data = {
836       .subdiv_ccg = subdiv_ccg,
837       .key = &key,
838       .effected_ccg_faces = (SubdivCCGFace **)effected_faces,
839   };
840   RecalcInnerNormalsTLSData tls_data = {NULL};
841   ParallelRangeSettings parallel_range_settings;
842   BLI_parallel_range_settings_defaults(&parallel_range_settings);
843   parallel_range_settings.userdata_chunk = &tls_data;
844   parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
845   parallel_range_settings.func_finalize = subdiv_ccg_recalc_modified_inner_normal_finalize;
846   BLI_task_parallel_range(0,
847                           num_effected_faces,
848                           &data,
849                           subdiv_ccg_recalc_modified_inner_normal_task,
850                           &parallel_range_settings);
851 }
852
853 void BKE_subdiv_ccg_update_normals(SubdivCCG *subdiv_ccg,
854                                    struct CCGFace **effected_faces,
855                                    int num_effected_faces)
856 {
857   if (!subdiv_ccg->has_normal) {
858     /* Grids don't have normals, can do early output. */
859     return;
860   }
861   if (num_effected_faces == 0) {
862     /* No faces changed, so nothing to do here. */
863     return;
864   }
865   subdiv_ccg_recalc_modified_inner_grid_normals(subdiv_ccg, effected_faces, num_effected_faces);
866   /* TODO(sergey): Only average elements which are adjacent to modified
867    * faces. */
868   CCGKey key;
869   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
870   subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
871 }
872
873 /* =============================================================================
874  * Boundary averaging/stitching.
875  */
876
877 typedef struct AverageInnerGridsData {
878   SubdivCCG *subdiv_ccg;
879   CCGKey *key;
880 } AverageInnerGridsData;
881
882 static void average_grid_element_value_v3(float a[3], float b[3])
883 {
884   add_v3_v3(a, b);
885   mul_v3_fl(a, 0.5f);
886   copy_v3_v3(b, a);
887 }
888
889 static void average_grid_element(SubdivCCG *subdiv_ccg,
890                                  CCGKey *key,
891                                  CCGElem *grid_element_a,
892                                  CCGElem *grid_element_b)
893 {
894   average_grid_element_value_v3(CCG_elem_co(key, grid_element_a),
895                                 CCG_elem_co(key, grid_element_b));
896   if (subdiv_ccg->has_normal) {
897     average_grid_element_value_v3(CCG_elem_no(key, grid_element_a),
898                                   CCG_elem_no(key, grid_element_b));
899   }
900   if (subdiv_ccg->has_mask) {
901     float mask = (*CCG_elem_mask(key, grid_element_a) + *CCG_elem_mask(key, grid_element_b)) *
902                  0.5f;
903     *CCG_elem_mask(key, grid_element_a) = mask;
904     *CCG_elem_mask(key, grid_element_b) = mask;
905   }
906 }
907
908 /* Accumulator to hold data during averaging. */
909 typedef struct GridElementAccumulator {
910   float co[3];
911   float no[3];
912   float mask;
913 } GridElementAccumulator;
914
915 static void element_accumulator_init(GridElementAccumulator *accumulator)
916 {
917   zero_v3(accumulator->co);
918   zero_v3(accumulator->no);
919   accumulator->mask = 0.0f;
920 }
921
922 static void element_accumulator_add(GridElementAccumulator *accumulator,
923                                     const SubdivCCG *subdiv_ccg,
924                                     CCGKey *key,
925                                     /*const*/ CCGElem *grid_element)
926 {
927   add_v3_v3(accumulator->co, CCG_elem_co(key, grid_element));
928   if (subdiv_ccg->has_normal) {
929     add_v3_v3(accumulator->no, CCG_elem_no(key, grid_element));
930   }
931   if (subdiv_ccg->has_mask) {
932     accumulator->mask += *CCG_elem_mask(key, grid_element);
933   }
934 }
935
936 static void element_accumulator_mul_fl(GridElementAccumulator *accumulator, const float f)
937 {
938   mul_v3_fl(accumulator->co, f);
939   mul_v3_fl(accumulator->no, f);
940   accumulator->mask *= f;
941 }
942
943 static void element_accumulator_copy(SubdivCCG *subdiv_ccg,
944                                      CCGKey *key,
945                                      CCGElem *destination,
946                                      const GridElementAccumulator *accumulator)
947 {
948   copy_v3_v3(CCG_elem_co(key, destination), accumulator->co);
949   if (subdiv_ccg->has_normal) {
950     copy_v3_v3(CCG_elem_no(key, destination), accumulator->no);
951   }
952   if (subdiv_ccg->has_mask) {
953     *CCG_elem_mask(key, destination) = accumulator->mask;
954   }
955 }
956
957 static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
958                                                 CCGKey *key,
959                                                 SubdivCCGFace *face)
960 {
961   CCGElem **grids = subdiv_ccg->grids;
962   const int num_face_grids = face->num_grids;
963   const int grid_size = subdiv_ccg->grid_size;
964   CCGElem *prev_grid = grids[face->start_grid_index + num_face_grids - 1];
965   for (int corner = 0; corner < num_face_grids; corner++) {
966     CCGElem *grid = grids[face->start_grid_index + corner];
967     for (int i = 0; i < grid_size; i++) {
968       CCGElem *prev_grid_element = CCG_grid_elem(key, prev_grid, i, 0);
969       CCGElem *grid_element = CCG_grid_elem(key, grid, 0, i);
970       average_grid_element(subdiv_ccg, key, prev_grid_element, grid_element);
971     }
972     prev_grid = grid;
973   }
974 }
975
976 static void subdiv_ccg_average_inner_grids_task(void *__restrict userdata_v,
977                                                 const int face_index,
978                                                 const ParallelRangeTLS *__restrict UNUSED(tls_v))
979 {
980   AverageInnerGridsData *data = userdata_v;
981   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
982   CCGKey *key = data->key;
983   SubdivCCGFace *faces = subdiv_ccg->faces;
984   SubdivCCGFace *face = &faces[face_index];
985   subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
986 }
987
988 typedef struct AverageGridsBoundariesData {
989   SubdivCCG *subdiv_ccg;
990   CCGKey *key;
991 } AverageGridsBoundariesData;
992
993 typedef struct AverageGridsBoundariesTLSData {
994   GridElementAccumulator *accumulators;
995 } AverageGridsBoundariesTLSData;
996
997 static void subdiv_ccg_average_grids_boundary(SubdivCCG *subdiv_ccg,
998                                               CCGKey *key,
999                                               SubdivCCGAdjacentEdge *adjacent_edge,
1000                                               AverageGridsBoundariesTLSData *tls)
1001 {
1002   const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
1003   const int grid_size2 = subdiv_ccg->grid_size * 2;
1004   if (num_adjacent_faces == 1) {
1005     /* Nothing to average with. */
1006     return;
1007   }
1008   if (tls->accumulators == NULL) {
1009     tls->accumulators = MEM_calloc_arrayN(
1010         sizeof(GridElementAccumulator), grid_size2, "average accumulators");
1011   }
1012   else {
1013     for (int i = 1; i < grid_size2 - 1; i++) {
1014       element_accumulator_init(&tls->accumulators[i]);
1015     }
1016   }
1017   for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
1018     for (int i = 1; i < grid_size2 - 1; i++) {
1019       CCGElem *grid_element = adjacent_edge->boundary_elements[face_index][i];
1020       element_accumulator_add(&tls->accumulators[i], subdiv_ccg, key, grid_element);
1021     }
1022   }
1023   for (int i = 1; i < grid_size2 - 1; i++) {
1024     element_accumulator_mul_fl(&tls->accumulators[i], 1.0f / (float)num_adjacent_faces);
1025   }
1026   /* Copy averaged value to all the other faces. */
1027   for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
1028     for (int i = 1; i < grid_size2 - 1; i++) {
1029       CCGElem *grid_element = adjacent_edge->boundary_elements[face_index][i];
1030       element_accumulator_copy(subdiv_ccg, key, grid_element, &tls->accumulators[i]);
1031     }
1032   }
1033 }
1034
1035 static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v,
1036                                                      const int adjacent_edge_index,
1037                                                      const ParallelRangeTLS *__restrict tls_v)
1038 {
1039   AverageGridsBoundariesData *data = userdata_v;
1040   AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
1041   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1042   CCGKey *key = data->key;
1043   SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[adjacent_edge_index];
1044   subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge, tls);
1045 }
1046
1047 static void subdiv_ccg_average_grids_boundaries_finalize(void *__restrict UNUSED(userdata),
1048                                                          void *__restrict tls_v)
1049 {
1050   AverageGridsBoundariesTLSData *tls = tls_v;
1051   MEM_SAFE_FREE(tls->accumulators);
1052 }
1053
1054 typedef struct AverageGridsCornerData {
1055   SubdivCCG *subdiv_ccg;
1056   CCGKey *key;
1057 } AverageGridsCornerData;
1058
1059 static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
1060                                              CCGKey *key,
1061                                              SubdivCCGAdjacentVertex *adjacent_vertex)
1062 {
1063   const int num_adjacent_faces = adjacent_vertex->num_adjacent_faces;
1064   if (num_adjacent_faces == 1) {
1065     /* Nothing to average with. */
1066     return;
1067   }
1068   GridElementAccumulator accumulator;
1069   element_accumulator_init(&accumulator);
1070   for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
1071     CCGElem *grid_element = adjacent_vertex->corner_elements[face_index];
1072     element_accumulator_add(&accumulator, subdiv_ccg, key, grid_element);
1073   }
1074   element_accumulator_mul_fl(&accumulator, 1.0f / (float)num_adjacent_faces);
1075   /* Copy averaged value to all the other faces. */
1076   for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
1077     CCGElem *grid_element = adjacent_vertex->corner_elements[face_index];
1078     element_accumulator_copy(subdiv_ccg, key, grid_element, &accumulator);
1079   }
1080 }
1081
1082 static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
1083                                                   const int adjacent_vertex_index,
1084                                                   const ParallelRangeTLS *__restrict UNUSED(tls_v))
1085 {
1086   AverageGridsCornerData *data = userdata_v;
1087   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1088   CCGKey *key = data->key;
1089   SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
1090   subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
1091 }
1092
1093 static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
1094 {
1095   ParallelRangeSettings parallel_range_settings;
1096   BLI_parallel_range_settings_defaults(&parallel_range_settings);
1097   AverageGridsBoundariesData boundaries_data = {
1098       .subdiv_ccg = subdiv_ccg,
1099       .key = key,
1100   };
1101   AverageGridsBoundariesTLSData tls_data = {NULL};
1102   parallel_range_settings.userdata_chunk = &tls_data;
1103   parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
1104   parallel_range_settings.func_finalize = subdiv_ccg_average_grids_boundaries_finalize;
1105   BLI_task_parallel_range(0,
1106                           subdiv_ccg->num_adjacent_edges,
1107                           &boundaries_data,
1108                           subdiv_ccg_average_grids_boundaries_task,
1109                           &parallel_range_settings);
1110 }
1111
1112 static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
1113 {
1114   ParallelRangeSettings parallel_range_settings;
1115   BLI_parallel_range_settings_defaults(&parallel_range_settings);
1116   AverageGridsCornerData corner_data = {
1117       .subdiv_ccg = subdiv_ccg,
1118       .key = key,
1119   };
1120   BLI_task_parallel_range(0,
1121                           subdiv_ccg->num_adjacent_vertices,
1122                           &corner_data,
1123                           subdiv_ccg_average_grids_corners_task,
1124                           &parallel_range_settings);
1125 }
1126
1127 static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
1128 {
1129   subdiv_ccg_average_all_boundaries(subdiv_ccg, key);
1130   subdiv_ccg_average_all_corners(subdiv_ccg, key);
1131 }
1132
1133 void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
1134 {
1135   CCGKey key;
1136   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
1137   ParallelRangeSettings parallel_range_settings;
1138   BLI_parallel_range_settings_defaults(&parallel_range_settings);
1139   /* Average inner boundaries of grids (within one face), across faces
1140    * from different face-corners. */
1141   AverageInnerGridsData inner_data = {
1142       .subdiv_ccg = subdiv_ccg,
1143       .key = &key,
1144   };
1145   BLI_task_parallel_range(0,
1146                           subdiv_ccg->num_faces,
1147                           &inner_data,
1148                           subdiv_ccg_average_inner_grids_task,
1149                           &parallel_range_settings);
1150   subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
1151 }
1152
1153 typedef struct StitchFacesInnerGridsData {
1154   SubdivCCG *subdiv_ccg;
1155   CCGKey *key;
1156   struct CCGFace **effected_ccg_faces;
1157 } StitchFacesInnerGridsData;
1158
1159 static void subdiv_ccg_stitch_face_inner_grids_task(
1160     void *__restrict userdata_v,
1161     const int face_index,
1162     const ParallelRangeTLS *__restrict UNUSED(tls_v))
1163 {
1164   StitchFacesInnerGridsData *data = userdata_v;
1165   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1166   CCGKey *key = data->key;
1167   struct CCGFace **effected_ccg_faces = data->effected_ccg_faces;
1168   struct CCGFace *effected_ccg_face = effected_ccg_faces[face_index];
1169   SubdivCCGFace *face = (SubdivCCGFace *)effected_ccg_face;
1170   subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
1171 }
1172
1173 void BKE_subdiv_ccg_average_stitch_faces(SubdivCCG *subdiv_ccg,
1174                                          struct CCGFace **effected_faces,
1175                                          int num_effected_faces)
1176 {
1177   CCGKey key;
1178   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
1179   StitchFacesInnerGridsData data = {
1180       .subdiv_ccg = subdiv_ccg,
1181       .key = &key,
1182       .effected_ccg_faces = effected_faces,
1183   };
1184   ParallelRangeSettings parallel_range_settings;
1185   BLI_parallel_range_settings_defaults(&parallel_range_settings);
1186   BLI_task_parallel_range(0,
1187                           num_effected_faces,
1188                           &data,
1189                           subdiv_ccg_stitch_face_inner_grids_task,
1190                           &parallel_range_settings);
1191   /* TODO(sergey): Only average elements which are adjacent to modified
1192    * faces. */
1193   subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
1194 }
1195
1196 void BKE_subdiv_ccg_topology_counters(const SubdivCCG *subdiv_ccg,
1197                                       int *r_num_vertices,
1198                                       int *r_num_edges,
1199                                       int *r_num_faces,
1200                                       int *r_num_loops)
1201 {
1202   const int num_grids = subdiv_ccg->num_grids;
1203   const int grid_size = subdiv_ccg->grid_size;
1204   const int grid_area = grid_size * grid_size;
1205   const int num_edges_per_grid = 2 * (grid_size * (grid_size - 1));
1206   *r_num_vertices = num_grids * grid_area;
1207   *r_num_edges = num_grids * num_edges_per_grid;
1208   *r_num_faces = num_grids * (grid_size - 1) * (grid_size - 1);
1209   *r_num_loops = *r_num_faces * 4;
1210 }