Cleanup: remove redundant doxygen \file argument
[blender.git] / source / blender / blenkernel / intern / subdiv_ccg.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2018 by Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file \ingroup bke
21  */
22
23 #include "BKE_subdiv_ccg.h"
24
25 #include "DNA_mesh_types.h"
26 #include "DNA_meshdata_types.h"
27
28 #include "MEM_guardedalloc.h"
29
30 #include "BLI_math_bits.h"
31 #include "BLI_math_vector.h"
32 #include "BLI_task.h"
33
34 #include "BKE_DerivedMesh.h"
35 #include "BKE_ccg.h"
36 #include "BKE_mesh.h"
37 #include "BKE_subdiv.h"
38 #include "BKE_subdiv_eval.h"
39
40 #include "opensubdiv_topology_refiner_capi.h"
41
42 /* =============================================================================
43  * Generally useful internal helpers.
44  */
45
46 /* Number of floats in per-vertex elements.  */
47 static int num_element_float_get(const SubdivCCG *subdiv_ccg)
48 {
49         /* We always have 3 floats for coordinate. */
50         int num_floats = 3;
51         if (subdiv_ccg->has_normal) {
52                 num_floats += 3;
53         }
54         if (subdiv_ccg->has_mask) {
55                 num_floats += 1;
56         }
57         return num_floats;
58 }
59
60 /* Per-vertex element size in bytes. */
61 static int element_size_bytes_get(const SubdivCCG *subdiv_ccg)
62 {
63         return sizeof(float) * num_element_float_get(subdiv_ccg);
64 }
65
66 /* =============================================================================
67  * Internal helpers for CCG creation.
68  */
69
70 static void subdiv_ccg_init_layers(SubdivCCG *subdiv_ccg,
71                                    const SubdivToCCGSettings *settings)
72 {
73         /* CCG always contains coordinates. Rest of layers are coming after them. */
74         int layer_offset = sizeof(float) * 3;
75         /* Mask. */
76         if (settings->need_mask) {
77                 subdiv_ccg->has_mask = true;
78                 subdiv_ccg->mask_offset = layer_offset;
79                 layer_offset += sizeof(float);
80         }
81         else {
82                 subdiv_ccg->has_mask = false;
83                 subdiv_ccg->mask_offset = -1;
84         }
85         /* Normals.
86          *
87          * NOTE: Keep them at the end, matching old CCGDM. Doesn't really matter
88          * here, but some other area might in theory depend memory layout. */
89         if (settings->need_normal) {
90                 subdiv_ccg->has_normal = true;
91                 subdiv_ccg->normal_offset = layer_offset;
92                 layer_offset += sizeof(float) * 3;
93         }
94         else {
95                 subdiv_ccg->has_normal = false;
96                 subdiv_ccg->normal_offset = -1;
97         }
98 }
99
100 /* TODO(sergey): Make it more accessible function. */
101 static int topology_refiner_count_face_corners(
102         OpenSubdiv_TopologyRefiner *topology_refiner)
103 {
104         const int num_faces = topology_refiner->getNumFaces(topology_refiner);
105         int num_corners = 0;
106         for (int face_index = 0; face_index < num_faces; face_index++) {
107                 num_corners += topology_refiner->getNumFaceVertices(
108                         topology_refiner, face_index);
109         }
110         return num_corners;
111 }
112
113 /* NOTE: Grid size and layer flags are to be filled in before calling this
114  * function. */
115 static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
116 {
117         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
118         const int element_size = element_size_bytes_get(subdiv_ccg);
119         /* Allocate memory for surface grids. */
120         const int num_faces = topology_refiner->getNumFaces(topology_refiner);
121         const int num_grids = topology_refiner_count_face_corners(topology_refiner);
122         const int grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
123         const int grid_area = grid_size * grid_size;
124         subdiv_ccg->num_grids = num_grids;
125         subdiv_ccg->grids =
126                 MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids");
127         subdiv_ccg->grids_storage = MEM_calloc_arrayN(
128                 num_grids, ((size_t)grid_area) * element_size,
129                 "subdiv ccg grids storage");
130         const size_t grid_size_in_bytes = (size_t)grid_area * element_size;
131         for (int grid_index = 0; grid_index < num_grids; grid_index++) {
132                 const size_t grid_offset = grid_size_in_bytes * grid_index;
133                 subdiv_ccg->grids[grid_index] =
134                         (CCGElem *)&subdiv_ccg->grids_storage[grid_offset];
135         }
136         /* Grid material flags. */
137         subdiv_ccg->grid_flag_mats = MEM_calloc_arrayN(
138                 num_grids, sizeof(DMFlagMat), "ccg grid material flags");
139         /* Grid hidden flags. */
140         subdiv_ccg->grid_hidden = MEM_calloc_arrayN(
141                 num_grids, sizeof(BLI_bitmap *), "ccg grid material flags");
142         for (int grid_index = 0; grid_index < num_grids; grid_index++) {
143                 subdiv_ccg->grid_hidden[grid_index] =
144                         BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
145         }
146         /* TODO(sergey): Allocate memory for loose elements. */
147         /* Allocate memory for faces. */
148         subdiv_ccg->num_faces = num_faces;
149         if (num_faces) {
150                 subdiv_ccg->faces = MEM_calloc_arrayN(
151                         num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces");
152                 subdiv_ccg->grid_faces = MEM_calloc_arrayN(
153                         num_grids, sizeof(SubdivCCGFace *), "Subdiv CCG grid faces");
154         }
155 }
156
157 /* =============================================================================
158  * Grids evaluation.
159  */
160
161 typedef struct CCGEvalGridsData {
162         SubdivCCG *subdiv_ccg;
163         Subdiv *subdiv;
164         int *face_ptex_offset;
165         SubdivCCGMask *mask_evaluator;
166 } CCGEvalGridsData;
167
168 static void subdiv_ccg_eval_grid_element(
169         CCGEvalGridsData *data,
170         const int ptex_face_index,
171         const float u, const float v,
172         unsigned char *element)
173 {
174         Subdiv *subdiv = data->subdiv;
175         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
176         if (subdiv->displacement_evaluator != NULL) {
177                 BKE_subdiv_eval_final_point(
178                         subdiv, ptex_face_index, u, v, (float *)element);
179         }
180         else if (subdiv_ccg->has_normal) {
181                 BKE_subdiv_eval_limit_point_and_normal(
182                         subdiv, ptex_face_index, u, v,
183                         (float *)element,
184                         (float *)(element + subdiv_ccg->normal_offset));
185         }
186         else {
187                 BKE_subdiv_eval_limit_point(
188                         subdiv, ptex_face_index, u, v, (float *)element);
189         }
190         if (subdiv_ccg->has_mask) {
191                 float *mask_value_ptr = (float *)(element + subdiv_ccg->mask_offset);
192                 if (data->mask_evaluator != NULL) {
193                         *mask_value_ptr = data->mask_evaluator->eval_mask(
194                                 data->mask_evaluator, ptex_face_index, u, v);
195                 }
196                 else {
197                         *mask_value_ptr = 0.0f;
198                 }
199         }
200 }
201
202 static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data,
203                                          const int face_index)
204 {
205         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
206         const int ptex_face_index = data->face_ptex_offset[face_index];
207         const int grid_size = subdiv_ccg->grid_size;
208         const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
209         const int element_size = element_size_bytes_get(subdiv_ccg);
210         SubdivCCGFace *faces = subdiv_ccg->faces;
211         SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
212         const SubdivCCGFace *face = &faces[face_index];
213         for (int corner = 0; corner < face->num_grids; corner++) {
214                 const int grid_index = face->start_grid_index + corner;
215                 unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
216                 for (int y = 0; y < grid_size; y++) {
217                         const float grid_v = (float)y * grid_size_1_inv;
218                         for (int x = 0; x < grid_size; x++) {
219                                 const float grid_u = (float)x * grid_size_1_inv;
220                                 float u, v;
221                                 BKE_subdiv_rotate_grid_to_quad(
222                                         corner, grid_u, grid_v, &u, &v);
223                                 const size_t grid_element_index = (size_t)y * grid_size + x;
224                                 const size_t grid_element_offset =
225                                         grid_element_index * element_size;
226                                 subdiv_ccg_eval_grid_element(
227                                         data,
228                                         ptex_face_index, u, v,
229                                         &grid[grid_element_offset]);
230                         }
231                 }
232                 /* Assign grid's face. */
233                 grid_faces[grid_index] = &faces[face_index];
234         }
235 }
236
237 static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data,
238                                          const int face_index)
239 {
240         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
241         const int grid_size = subdiv_ccg->grid_size;
242         const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
243         const int element_size = element_size_bytes_get(subdiv_ccg);
244         SubdivCCGFace *faces = subdiv_ccg->faces;
245         SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
246         const SubdivCCGFace *face = &faces[face_index];
247         for (int corner = 0; corner < face->num_grids; corner++) {
248                 const int grid_index = face->start_grid_index + corner;
249                 unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
250                 for (int y = 0; y < grid_size; y++) {
251                         const float u = 1.0f - ((float)y * grid_size_1_inv);
252                         for (int x = 0; x < grid_size; x++) {
253                                 const float v = 1.0f - ((float)x * grid_size_1_inv);
254                                 const int ptex_face_index =
255                                         data->face_ptex_offset[face_index] + corner;
256                                 const size_t grid_element_index = (size_t)y * grid_size + x;
257                                 const size_t grid_element_offset =
258                                         grid_element_index * element_size;
259                                 subdiv_ccg_eval_grid_element(
260                                         data,
261                                         ptex_face_index, u, v,
262                                         &grid[grid_element_offset]);
263                         }
264                 }
265                 /* Assign grid's face. */
266                 grid_faces[grid_index] = &faces[face_index];
267         }
268 }
269
270 static void subdiv_ccg_eval_grids_task(
271         void *__restrict userdata_v,
272         const int face_index,
273         const ParallelRangeTLS *__restrict UNUSED(tls))
274 {
275         CCGEvalGridsData *data = userdata_v;
276         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
277         SubdivCCGFace *face = &subdiv_ccg->faces[face_index];
278         if (face->num_grids == 4) {
279                 subdiv_ccg_eval_regular_grid(data, face_index);
280         }
281         else {
282                 subdiv_ccg_eval_special_grid(data, face_index);
283         }
284 }
285
286 static bool subdiv_ccg_evaluate_grids(
287         SubdivCCG *subdiv_ccg,
288         Subdiv *subdiv,
289         SubdivCCGMask *mask_evaluator)
290 {
291         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
292         const int num_faces = topology_refiner->getNumFaces(topology_refiner);
293         /* Initialize data passed to all the tasks. */
294         CCGEvalGridsData data;
295         data.subdiv_ccg = subdiv_ccg;
296         data.subdiv = subdiv;
297         data.face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
298         data.mask_evaluator = mask_evaluator;
299         /* Threaded grids evaluation. */
300         ParallelRangeSettings parallel_range_settings;
301         BLI_parallel_range_settings_defaults(&parallel_range_settings);
302         BLI_task_parallel_range(0, num_faces,
303                                 &data,
304                                 subdiv_ccg_eval_grids_task,
305                                 &parallel_range_settings);
306         /* If displacement is used, need to calculate normals after all final
307          * coordinates are known. */
308         if (subdiv->displacement_evaluator != NULL) {
309                 BKE_subdiv_ccg_recalc_normals(subdiv_ccg);
310         }
311         return true;
312 }
313
314 /* Initialize face descriptors, assuming memory for them was already
315  * allocated. */
316 static void subdiv_ccg_init_faces(SubdivCCG *subdiv_ccg)
317 {
318         Subdiv *subdiv = subdiv_ccg->subdiv;
319         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
320         const int num_faces = subdiv_ccg->num_faces;
321         int corner_index = 0;
322         for (int face_index = 0; face_index < num_faces; face_index++) {
323                 const int num_corners = topology_refiner->getNumFaceVertices(
324                         topology_refiner, face_index);
325                 subdiv_ccg->faces[face_index].num_grids = num_corners;
326                 subdiv_ccg->faces[face_index].start_grid_index = corner_index;
327                 corner_index += num_corners;
328         }
329 }
330
331 /* TODO(sergey): Consider making it generic enough to be fit into BLI. */
332 typedef struct StaticOrHeapIntStorage {
333         int static_storage[64];
334         int static_storage_size;
335         int *heap_storage;
336         int heap_storage_size;
337 } StaticOrHeapIntStorage;
338
339 static void static_or_heap_storage_init(StaticOrHeapIntStorage *storage)
340 {
341         storage->static_storage_size =
342                 sizeof(storage->static_storage) / sizeof(*storage->static_storage);
343         storage->heap_storage = NULL;
344         storage->heap_storage_size = 0;
345 }
346
347 static int *static_or_heap_storage_get(StaticOrHeapIntStorage *storage,
348                                        int size)
349 {
350         /* Requested size small enough to be fit into stack allocated memory. */
351         if (size <= storage->static_storage_size) {
352                 return storage->static_storage;
353         }
354         /* Make sure heap ius big enough. */
355         if (size > storage->heap_storage_size) {
356                 MEM_SAFE_FREE(storage->heap_storage);
357                 storage->heap_storage = MEM_malloc_arrayN(
358                         size, sizeof(int), "int storage");
359                 storage->heap_storage_size = size;
360         }
361         return storage->heap_storage;
362 }
363
364 static void static_or_heap_storage_free(StaticOrHeapIntStorage *storage)
365 {
366         MEM_SAFE_FREE(storage->heap_storage);
367 }
368
369 static void subdiv_ccg_allocate_adjacent_edges(SubdivCCG *subdiv_ccg,
370                                                const int num_edges)
371 {
372         subdiv_ccg->num_adjacent_edges = num_edges;
373         subdiv_ccg->adjacent_edges = MEM_calloc_arrayN(
374                 subdiv_ccg->num_adjacent_edges,
375                 sizeof(*subdiv_ccg->adjacent_edges),
376                 "ccg adjacent edges");
377 }
378
379 /* Returns storage where boundary elements are to be stored. */
380 static CCGElem **subdiv_ccg_adjacent_edge_add_face(
381         SubdivCCG *subdiv_ccg,
382         SubdivCCGAdjacentEdge *adjacent_edge,
383         SubdivCCGFace *face)
384 {
385         const int grid_size = subdiv_ccg->grid_size * 2;
386         const int adjacent_face_index = adjacent_edge->num_adjacent_faces;
387         ++adjacent_edge->num_adjacent_faces;
388         /* Store new adjacent face. */
389         adjacent_edge->faces = MEM_reallocN(
390                 adjacent_edge->faces,
391                 adjacent_edge->num_adjacent_faces * sizeof(*adjacent_edge->faces));
392         adjacent_edge->faces[adjacent_face_index] = face;
393         /* Allocate memory for the boundary elements. */
394         adjacent_edge->boundary_elements = MEM_reallocN(
395                 adjacent_edge->boundary_elements,
396                 adjacent_edge->num_adjacent_faces *
397                         sizeof(*adjacent_edge->boundary_elements));
398         adjacent_edge->boundary_elements[adjacent_face_index] =
399                 MEM_malloc_arrayN(
400                         grid_size * 2, sizeof(CCGElem *), "ccg adjacent boundary");
401         return adjacent_edge->boundary_elements[adjacent_face_index];
402 }
403
404 static void subdiv_ccg_init_faces_edge_neighborhood(SubdivCCG *subdiv_ccg)
405 {
406         Subdiv *subdiv = subdiv_ccg->subdiv;
407         SubdivCCGFace *faces = subdiv_ccg->faces;
408         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
409         const int num_edges = topology_refiner->getNumEdges(topology_refiner);
410         const int grid_size = subdiv_ccg->grid_size;
411         if (num_edges == 0) {
412                 /* Early output, nothing to do in this case. */
413                 return;
414         }
415         subdiv_ccg_allocate_adjacent_edges(subdiv_ccg, num_edges);
416         /* Initialize storage. */
417         StaticOrHeapIntStorage face_vertices_storage;
418         StaticOrHeapIntStorage face_edges_storage;
419         static_or_heap_storage_init(&face_vertices_storage);
420         static_or_heap_storage_init(&face_edges_storage);
421         /* Key to access elements. */
422         CCGKey key;
423         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
424         /* Store adjacency for all faces. */
425         const int num_faces = subdiv_ccg->num_faces;
426         for (int face_index = 0; face_index < num_faces; face_index++) {
427                 SubdivCCGFace *face = &faces[face_index];
428                 const int num_face_grids = face->num_grids;
429                 const int num_face_edges = num_face_grids;
430                 int *face_vertices = static_or_heap_storage_get(
431                         &face_vertices_storage, num_face_edges);
432                 topology_refiner->getFaceVertices(
433                         topology_refiner, face_index, face_vertices);
434                 /* Note that order of edges is same as order of MLoops, which also
435                  * means it's the same as order of grids. */
436                 int *face_edges = static_or_heap_storage_get(
437                         &face_edges_storage, num_face_edges);
438                 topology_refiner->getFaceEdges(
439                         topology_refiner, face_index, face_edges);
440                 /* Store grids adjacency for this edge. */
441                 for (int corner = 0; corner < num_face_edges; corner++) {
442                         const int vertex_index = face_vertices[corner];
443                         const int edge_index = face_edges[corner];
444                         int edge_vertices[2];
445                         topology_refiner->getEdgeVertices(
446                                 topology_refiner, edge_index, edge_vertices);
447                         const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
448                         /* Grid which is adjacent to the current corner. */
449                         const int current_grid_index = face->start_grid_index + corner;
450                         CCGElem *current_grid = subdiv_ccg->grids[current_grid_index];
451                         /* Grid which is adjacent to the next corner. */
452                         const int next_grid_index =
453                                 face->start_grid_index + (corner + 1) % num_face_grids;
454                         CCGElem *next_grid = subdiv_ccg->grids[next_grid_index];
455                         /* Add new face to the adjacent edge. */
456                         SubdivCCGAdjacentEdge *adjacent_edge =
457                                 &subdiv_ccg->adjacent_edges[edge_index];
458                         CCGElem **boundary_elements = subdiv_ccg_adjacent_edge_add_face(
459                                 subdiv_ccg, adjacent_edge, face);
460                         /* Fill CCG elements along the edge. */
461                         int boundary_element_index = 0;
462                         if (is_edge_flipped) {
463                                 for (int i = 0; i < grid_size; i++) {
464                                         boundary_elements[boundary_element_index++] =
465                                                 CCG_grid_elem(&key,
466                                                               next_grid,
467                                                               grid_size - i - 1,
468                                                               grid_size - 1);
469                                 }
470                                 for (int i = 0; i < grid_size; i++) {
471                                         boundary_elements[boundary_element_index++] =
472                                                 CCG_grid_elem(&key,
473                                                               current_grid,
474                                                               grid_size - 1,
475                                                               i);
476                                 }
477                         }
478                         else {
479                                 for (int i = 0; i < grid_size; i++) {
480                                         boundary_elements[boundary_element_index++] =
481                                                 CCG_grid_elem(&key,
482                                                               current_grid,
483                                                               grid_size - 1,
484                                                               grid_size - i - 1);
485                                 }
486                                 for (int i = 0; i < grid_size; i++) {
487                                         boundary_elements[boundary_element_index++] =
488                                                 CCG_grid_elem(&key,
489                                                               next_grid,
490                                                               i,
491                                                               grid_size - 1);
492                                 }
493                         }
494                 }
495         }
496         /* Free possibly heap-allocated storage. */
497         static_or_heap_storage_free(&face_vertices_storage);
498         static_or_heap_storage_free(&face_edges_storage);
499 }
500
501 static void subdiv_ccg_allocate_adjacent_vertices(SubdivCCG *subdiv_ccg,
502                                                   const int num_vertices)
503 {
504         subdiv_ccg->num_adjacent_vertices = num_vertices;
505         subdiv_ccg->adjacent_vertices = MEM_calloc_arrayN(
506                 subdiv_ccg->num_adjacent_vertices,
507                 sizeof(*subdiv_ccg->adjacent_vertices),
508                 "ccg adjacent vertices");
509 }
510
511 /* Returns storage where corner elements are to be stored. This is a pointer
512  * to the actual storage. */
513 static CCGElem **subdiv_ccg_adjacent_vertex_add_face(
514         SubdivCCGAdjacentVertex *adjacent_vertex,
515         SubdivCCGFace *face)
516 {
517         const int adjacent_face_index = adjacent_vertex->num_adjacent_faces;
518         ++adjacent_vertex->num_adjacent_faces;
519         /* Store new adjacent face. */
520         adjacent_vertex->faces = MEM_reallocN(
521                 adjacent_vertex->faces,
522                 adjacent_vertex->num_adjacent_faces *
523                         sizeof(*adjacent_vertex->faces));
524         adjacent_vertex->faces[adjacent_face_index] = face;
525         /* Allocate memory for the boundary elements. */
526         adjacent_vertex->corner_elements = MEM_reallocN(
527                 adjacent_vertex->corner_elements,
528                 adjacent_vertex->num_adjacent_faces *
529                         sizeof(*adjacent_vertex->corner_elements));
530         return &adjacent_vertex->corner_elements[adjacent_face_index];
531 }
532
533 static void subdiv_ccg_init_faces_vertex_neighborhood(SubdivCCG *subdiv_ccg)
534 {
535         Subdiv *subdiv = subdiv_ccg->subdiv;
536         SubdivCCGFace *faces = subdiv_ccg->faces;
537         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
538         const int num_vertices =
539                 topology_refiner->getNumVertices(topology_refiner);
540         const int grid_size = subdiv_ccg->grid_size;
541         if (num_vertices == 0) {
542                 /* Early output, nothing to do in this case. */
543                 return;
544         }
545         subdiv_ccg_allocate_adjacent_vertices(subdiv_ccg, num_vertices);
546         /* Initialize storage. */
547         StaticOrHeapIntStorage face_vertices_storage;
548         static_or_heap_storage_init(&face_vertices_storage);
549         /* Key to access elements. */
550         CCGKey key;
551         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
552         /* Store adjacency for all faces. */
553         const int num_faces = subdiv_ccg->num_faces;
554         for (int face_index = 0; face_index < num_faces; face_index++) {
555                 SubdivCCGFace *face = &faces[face_index];
556                 const int num_face_grids = face->num_grids;
557                 const int num_face_edges = num_face_grids;
558                 int *face_vertices = static_or_heap_storage_get(
559                         &face_vertices_storage, num_face_edges);
560                 topology_refiner->getFaceVertices(
561                         topology_refiner, face_index, face_vertices);
562                 for (int corner = 0; corner < num_face_edges; corner++) {
563                         const int vertex_index = face_vertices[corner];
564                         /* Grid which is adjacent to the current corner. */
565                         const int grid_index = face->start_grid_index + corner;
566                         CCGElem *grid = subdiv_ccg->grids[grid_index];
567                         /* Add new face to the adjacent edge. */
568                         SubdivCCGAdjacentVertex *adjacent_vertex =
569                                 &subdiv_ccg->adjacent_vertices[vertex_index];
570                         CCGElem **corner_element = subdiv_ccg_adjacent_vertex_add_face(
571                                 adjacent_vertex, face);
572                         *corner_element = CCG_grid_elem(
573                                 &key, grid, grid_size - 1, grid_size - 1);
574                 }
575         }
576         /* Free possibly heap-allocated storage. */
577         static_or_heap_storage_free(&face_vertices_storage);
578 }
579
580 static void subdiv_ccg_init_faces_neighborhood(SubdivCCG *subdiv_ccg)
581 {
582         subdiv_ccg_init_faces_edge_neighborhood(subdiv_ccg);
583         subdiv_ccg_init_faces_vertex_neighborhood(subdiv_ccg);
584 }
585
586 /* =============================================================================
587  * Creation / evaluation.
588  */
589
590 SubdivCCG *BKE_subdiv_to_ccg(
591         Subdiv *subdiv,
592         const SubdivToCCGSettings *settings,
593         SubdivCCGMask *mask_evaluator)
594 {
595         BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
596         SubdivCCG *subdiv_ccg = MEM_callocN(sizeof(SubdivCCG), "subdiv ccg");
597         subdiv_ccg->subdiv = subdiv;
598         subdiv_ccg->level = bitscan_forward_i(settings->resolution - 1);
599         subdiv_ccg->grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
600         subdiv_ccg_init_layers(subdiv_ccg, settings);
601         subdiv_ccg_alloc_elements(subdiv_ccg, subdiv);
602         subdiv_ccg_init_faces(subdiv_ccg);
603         subdiv_ccg_init_faces_neighborhood(subdiv_ccg);
604         if (!subdiv_ccg_evaluate_grids(subdiv_ccg, subdiv, mask_evaluator)) {
605                 BKE_subdiv_ccg_destroy(subdiv_ccg);
606                 BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
607                 return NULL;
608         }
609         BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
610         return subdiv_ccg;
611 }
612
613 Mesh *BKE_subdiv_to_ccg_mesh(
614         Subdiv *subdiv,
615         const SubdivToCCGSettings *settings,
616         const Mesh *coarse_mesh)
617 {
618         /* Make sure evaluator is ready. */
619         BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
620         if (!BKE_subdiv_eval_update_from_mesh(subdiv, coarse_mesh)) {
621                 if (coarse_mesh->totpoly) {
622                         return false;
623                 }
624         }
625         BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
626         SubdivCCGMask mask_evaluator;
627         bool has_mask = BKE_subdiv_ccg_mask_init_from_paint(
628         &mask_evaluator, coarse_mesh);
629         SubdivCCG *subdiv_ccg = BKE_subdiv_to_ccg(
630             subdiv, settings, has_mask ? &mask_evaluator : NULL);
631         if (has_mask) {
632                 mask_evaluator.free(&mask_evaluator);
633         }
634         if (subdiv_ccg == NULL) {
635                 return NULL;
636         }
637         Mesh *result = BKE_mesh_new_nomain_from_template(
638                 coarse_mesh, 0, 0, 0, 0, 0);
639         result->runtime.subdiv_ccg = subdiv_ccg;
640         return result;
641 }
642
643 void BKE_subdiv_ccg_destroy(SubdivCCG *subdiv_ccg)
644 {
645         const int num_grids = subdiv_ccg->num_grids;
646         MEM_SAFE_FREE(subdiv_ccg->grids);
647         MEM_SAFE_FREE(subdiv_ccg->grids_storage);
648         MEM_SAFE_FREE(subdiv_ccg->edges);
649         MEM_SAFE_FREE(subdiv_ccg->vertices);
650         MEM_SAFE_FREE(subdiv_ccg->grid_flag_mats);
651         if (subdiv_ccg->grid_hidden != NULL) {
652                 for (int grid_index = 0; grid_index < num_grids; grid_index++) {
653                         MEM_freeN(subdiv_ccg->grid_hidden[grid_index]);
654                 }
655                 MEM_freeN(subdiv_ccg->grid_hidden);
656         }
657         if (subdiv_ccg->subdiv != NULL) {
658                 BKE_subdiv_free(subdiv_ccg->subdiv);
659         }
660         MEM_SAFE_FREE(subdiv_ccg->faces);
661         MEM_SAFE_FREE(subdiv_ccg->grid_faces);
662         /* Free map of adjacent edges. */
663         for (int i = 0; i < subdiv_ccg->num_adjacent_edges; i++) {
664                 SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[i];
665                 for (int face_index = 0;
666                      face_index < adjacent_edge->num_adjacent_faces;
667                      face_index++)
668                 {
669                         MEM_SAFE_FREE(adjacent_edge->boundary_elements[face_index]);
670                 }
671                 MEM_SAFE_FREE(adjacent_edge->faces);
672                 MEM_SAFE_FREE(adjacent_edge->boundary_elements);
673         }
674         MEM_SAFE_FREE(subdiv_ccg->adjacent_edges);
675         /* Free map of adjacent vertices. */
676         for (int i = 0; i < subdiv_ccg->num_adjacent_vertices; i++) {
677                 SubdivCCGAdjacentVertex *adjacent_vertex =
678                         &subdiv_ccg->adjacent_vertices[i];
679                 MEM_SAFE_FREE(adjacent_vertex->faces);
680                 MEM_SAFE_FREE(adjacent_vertex->corner_elements);
681         }
682         MEM_SAFE_FREE(subdiv_ccg->adjacent_vertices);
683         MEM_freeN(subdiv_ccg);
684 }
685
686 void BKE_subdiv_ccg_key(CCGKey *key, const SubdivCCG *subdiv_ccg, int level)
687 {
688         key->level = level;
689         key->elem_size = element_size_bytes_get(subdiv_ccg);
690         key->grid_size = BKE_subdiv_grid_size_from_level(level);
691         key->grid_area = key->grid_size * key->grid_size;
692         key->grid_bytes = key->elem_size * key->grid_area;
693
694         key->normal_offset = subdiv_ccg->normal_offset;
695         key->mask_offset = subdiv_ccg->mask_offset;
696
697         key->has_normals = subdiv_ccg->has_normal;
698         key->has_mask = subdiv_ccg->has_mask;
699 }
700
701 void BKE_subdiv_ccg_key_top_level(CCGKey *key, const SubdivCCG *subdiv_ccg)
702 {
703         BKE_subdiv_ccg_key(key, subdiv_ccg, subdiv_ccg->level);
704 }
705
706 /* =============================================================================
707  * Normals.
708  */
709
710 typedef struct RecalcInnerNormalsData {
711         SubdivCCG *subdiv_ccg;
712         CCGKey *key;
713 } RecalcInnerNormalsData;
714
715 typedef struct RecalcInnerNormalsTLSData {
716         float (*face_normals)[3];
717 } RecalcInnerNormalsTLSData;
718
719 /* Evaluate high-res face normals, for faces which corresponds to grid elements
720  *
721  *   {(x, y), {x + 1, y}, {x + 1, y + 1}, {x, y + 1}}
722  *
723  * The result is stored in normals storage from TLS. */
724 static void subdiv_ccg_recalc_inner_face_normals(
725         RecalcInnerNormalsData *data,
726         RecalcInnerNormalsTLSData *tls,
727         const int grid_index)
728 {
729         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
730         CCGKey *key = data->key;
731         const int grid_size = subdiv_ccg->grid_size;
732         const int grid_size_1 = grid_size - 1;
733         CCGElem *grid = subdiv_ccg->grids[grid_index];
734         if (tls->face_normals == NULL) {
735                 tls->face_normals = MEM_malloc_arrayN(
736                         grid_size_1 * grid_size_1,
737                         3 * sizeof(float),
738                         "CCG TLS normals");
739         }
740         for (int y = 0; y < grid_size -1; y++) {
741                 for (int x = 0; x < grid_size - 1; x++) {
742                         CCGElem *grid_elements[4] = {
743                                 CCG_grid_elem(key, grid, x, y + 1),
744                                 CCG_grid_elem(key, grid, x + 1, y + 1),
745                                 CCG_grid_elem(key, grid, x + 1, y),
746                                 CCG_grid_elem(key, grid, x, y),
747                         };
748                         float *co[4] = {
749                             CCG_elem_co(key, grid_elements[0]),
750                             CCG_elem_co(key, grid_elements[1]),
751                             CCG_elem_co(key, grid_elements[2]),
752                             CCG_elem_co(key, grid_elements[3]),
753                         };
754                         const int face_index = y * grid_size_1 + x;
755                         float *face_normal = tls->face_normals[face_index];
756                         normal_quad_v3(face_normal, co[0], co[1], co[2], co[3]);
757                 }
758         }
759 }
760
761 /* Average normals at every grid element, using adjacent faces normals. */
762 static void subdiv_ccg_average_inner_face_normals(
763         RecalcInnerNormalsData *data,
764         RecalcInnerNormalsTLSData *tls,
765         const int grid_index)
766 {
767         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
768         CCGKey *key = data->key;
769         const int grid_size = subdiv_ccg->grid_size;
770         const int grid_size_1 = grid_size - 1;
771         CCGElem *grid = subdiv_ccg->grids[grid_index];
772         const float (*face_normals)[3] = tls->face_normals;
773         for (int y = 0; y < grid_size; y++) {
774                 for (int x = 0; x < grid_size; x++) {
775                         float normal_acc[3] = {0.0f, 0.0f, 0.0f};
776                         int counter = 0;
777                         /* Accumulate normals of all adjacent faces. */
778                         if (x < grid_size_1 && y < grid_size_1) {
779                                 add_v3_v3(normal_acc, face_normals[y * grid_size_1 + x]);
780                                 counter++;
781                         }
782                         if (x >= 1) {
783                                 if (y < grid_size_1) {
784                                         add_v3_v3(normal_acc,
785                                                   face_normals[y * grid_size_1 + (x - 1)]);
786                                         counter++;
787                                 }
788                                 if (y >= 1) {
789                                         add_v3_v3(normal_acc,
790                                                   face_normals[(y - 1) * grid_size_1 + (x - 1)]);
791                                         counter++;
792                                 }
793                         }
794                         if (y >= 1 && x < grid_size_1) {
795                                 add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + x]);
796                                 counter++;
797                         }
798                         /* Normalize and store. */
799                         mul_v3_v3fl(CCG_grid_elem_no(key, grid, x, y),
800                                     normal_acc,
801                                     1.0f / (float)counter);
802                 }
803         }
804 }
805
806 static void subdiv_ccg_recalc_inner_normal_task(
807         void *__restrict userdata_v,
808         const int grid_index,
809         const ParallelRangeTLS *__restrict tls_v)
810 {
811         RecalcInnerNormalsData *data = userdata_v;
812         RecalcInnerNormalsTLSData *tls = tls_v->userdata_chunk;
813         subdiv_ccg_recalc_inner_face_normals(data, tls, grid_index);
814         subdiv_ccg_average_inner_face_normals(data, tls, grid_index);
815 }
816
817 static void subdiv_ccg_recalc_inner_normal_finalize(
818         void *__restrict UNUSED(userdata),
819         void *__restrict tls_v)
820 {
821         RecalcInnerNormalsTLSData *tls = tls_v;
822         MEM_SAFE_FREE(tls->face_normals);
823 }
824
825 /* Recalculate normals which corresponds to non-boundaries elements of grids. */
826 static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
827 {
828         CCGKey key;
829         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
830         RecalcInnerNormalsData data = {
831                 .subdiv_ccg = subdiv_ccg,
832                 .key = &key,
833         };
834         RecalcInnerNormalsTLSData tls_data = {NULL};
835         ParallelRangeSettings parallel_range_settings;
836         BLI_parallel_range_settings_defaults(&parallel_range_settings);
837         parallel_range_settings.userdata_chunk = &tls_data;
838         parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
839         parallel_range_settings.func_finalize =
840                 subdiv_ccg_recalc_inner_normal_finalize;
841         BLI_task_parallel_range(0, subdiv_ccg->num_grids,
842                                 &data,
843                                 subdiv_ccg_recalc_inner_normal_task,
844                                 &parallel_range_settings);
845 }
846
847 void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg)
848 {
849         if (!subdiv_ccg->has_normal) {
850                 /* Grids don't have normals, can do early output. */
851                 return;
852         }
853         subdiv_ccg_recalc_inner_grid_normals(subdiv_ccg);
854         BKE_subdiv_ccg_average_grids(subdiv_ccg);
855 }
856
857 /* =============================================================================
858  * Boundary averaging/stitching.
859  */
860
861 typedef struct AverageInnerGridsData {
862         SubdivCCG *subdiv_ccg;
863         CCGKey *key;
864 } AverageInnerGridsData;
865
866 static void average_grid_element_value_v3(float a[3], float b[3])
867 {
868         add_v3_v3(a, b);
869         mul_v3_fl(a, 0.5f);
870         copy_v3_v3(b, a);
871 }
872
873 static void average_grid_element(SubdivCCG *subdiv_ccg,
874                                  CCGKey *key,
875                                  CCGElem *grid_element_a,
876                                  CCGElem *grid_element_b)
877 {
878         average_grid_element_value_v3(CCG_elem_co(key, grid_element_a),
879                                       CCG_elem_co(key, grid_element_b));
880         if (subdiv_ccg->has_normal) {
881                 average_grid_element_value_v3(CCG_elem_no(key, grid_element_a),
882                                               CCG_elem_no(key, grid_element_b));
883         }
884         if (subdiv_ccg->has_mask) {
885                 float mask =
886                         (*CCG_elem_mask(key, grid_element_a) +
887                          *CCG_elem_mask(key, grid_element_b)) * 0.5f;
888                 *CCG_elem_mask(key, grid_element_a) = mask;
889                 *CCG_elem_mask(key, grid_element_b) = mask;
890         }
891 }
892
893 static void copy_grid_element(SubdivCCG *subdiv_ccg,
894                               CCGKey *key,
895                               CCGElem *destination,
896                               CCGElem *source)
897 {
898         copy_v3_v3(CCG_elem_co(key, destination), CCG_elem_co(key, source));
899         if (subdiv_ccg->has_normal) {
900                 copy_v3_v3(CCG_elem_no(key, destination), CCG_elem_no(key, source));
901         }
902         if (subdiv_ccg->has_mask) {
903                 *CCG_elem_mask(key, destination) = *CCG_elem_mask(key, source);
904         }
905 }
906
907 static void subdiv_ccg_average_inner_face_grids(
908         SubdivCCG *subdiv_ccg,
909         CCGKey *key,
910         SubdivCCGFace *face)
911 {
912         CCGElem **grids = subdiv_ccg->grids;
913         const int num_face_grids = face->num_grids;
914         const int grid_size = subdiv_ccg->grid_size;
915         CCGElem *prev_grid = grids[face->start_grid_index + num_face_grids - 1];
916         for (int corner = 0; corner < num_face_grids; corner++) {
917                 CCGElem *grid = grids[face->start_grid_index + corner];
918                 for (int i = 0; i < grid_size; i++) {
919                         CCGElem *prev_grid_element = CCG_grid_elem(key, prev_grid, i, 0);
920                         CCGElem *grid_element = CCG_grid_elem(key, grid, 0, i);
921                         average_grid_element(
922                                 subdiv_ccg, key, prev_grid_element, grid_element);
923                 }
924                 prev_grid = grid;
925         }
926
927 }
928
929 static void subdiv_ccg_average_inner_grids_task(
930         void *__restrict userdata_v,
931         const int face_index,
932         const ParallelRangeTLS *__restrict UNUSED(tls_v))
933 {
934         AverageInnerGridsData *data = userdata_v;
935         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
936         CCGKey *key = data->key;
937         SubdivCCGFace *faces = subdiv_ccg->faces;
938         SubdivCCGFace *face = &faces[face_index];
939         subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
940 }
941
942 typedef struct AverageGridsBoundariesData {
943         SubdivCCG *subdiv_ccg;
944         CCGKey *key;
945 } AverageGridsBoundariesData;
946
947 static void subdiv_ccg_average_grids_boundary(
948         SubdivCCG *subdiv_ccg,
949         CCGKey *key,
950         SubdivCCGAdjacentEdge *adjacent_edge)
951 {
952         const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
953         const int grid_size2 = subdiv_ccg->grid_size * 2;
954         if (num_adjacent_faces == 1) {
955                 /* Nothing to average with. */
956                 return;
957         }
958         /* Incrementall average result to elements of a first adjacent face.
959          *
960          * Arguably, this is less precise than accumulating and then diving once,
961          * but on another hand this is more stable when coordinates are big. */
962         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
963                 /* NOTE: We ignore very first and very last elements, they correspond
964                  * to corner vertices, and they can belong to multiple edges.
965                  * The fact, that they can belong to multiple edges means we can't
966                  * safely average them.
967                  * The fact, that they correspond to a corner elements, means they will
968                  * be handled at the upcoming pass over corner elements. */
969                 for (int i = 1; i < grid_size2 - 1; i++) {
970                         CCGElem *grid_element_0 =
971                                 adjacent_edge->boundary_elements[0][i];
972                         CCGElem *grid_element_face_index =
973                                 adjacent_edge->boundary_elements[face_index][i];
974                         average_grid_element(subdiv_ccg,
975                                              key,
976                                              grid_element_0,
977                                              grid_element_face_index);
978                 }
979         }
980         /* Copy averaged value to all the other faces. */
981         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
982                 for (int i = 1; i < grid_size2 -1; i++) {
983                         CCGElem *grid_element_0 =
984                                 adjacent_edge->boundary_elements[0][i];
985                         CCGElem *grid_element_face_index =
986                                 adjacent_edge->boundary_elements[face_index][i];
987                         copy_grid_element(subdiv_ccg,
988                                           key,
989                                           grid_element_face_index,
990                                           grid_element_0);
991                 }
992         }
993 }
994
995 static void subdiv_ccg_average_grids_boundaries_task(
996         void *__restrict userdata_v,
997         const int adjacent_edge_index,
998         const ParallelRangeTLS *__restrict UNUSED(tls_v))
999 {
1000         AverageGridsBoundariesData *data = userdata_v;
1001         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1002         CCGKey *key = data->key;
1003         SubdivCCGAdjacentEdge *adjacent_edge =
1004                 &subdiv_ccg->adjacent_edges[adjacent_edge_index];
1005         subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge);
1006 }
1007
1008 typedef struct AverageGridsCornerData {
1009         SubdivCCG *subdiv_ccg;
1010         CCGKey *key;
1011 } AverageGridsCornerData;
1012
1013 static void subdiv_ccg_average_grids_corners(
1014         SubdivCCG *subdiv_ccg,
1015         CCGKey *key,
1016         SubdivCCGAdjacentVertex *adjacent_vertex)
1017 {
1018         const int num_adjacent_faces = adjacent_vertex->num_adjacent_faces;
1019         if (num_adjacent_faces == 1) {
1020                 /* Nothing to average with. */
1021                 return;
1022         }
1023         /* Incrementall average result to elements of a first adjacent face.
1024          * See comment to the boundary averaging. */
1025         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
1026                 CCGElem *grid_element_0 =
1027                         adjacent_vertex->corner_elements[0];
1028                 CCGElem *grid_element_face_index =
1029                         adjacent_vertex->corner_elements[face_index];
1030                 average_grid_element(subdiv_ccg,
1031                                      key,
1032                                      grid_element_0,
1033                                      grid_element_face_index);
1034         }
1035         /* Copy averaged value to all the other faces. */
1036         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
1037                 CCGElem *grid_element_0 =
1038                         adjacent_vertex->corner_elements[0];
1039                 CCGElem *grid_element_face_index =
1040                         adjacent_vertex->corner_elements[face_index];
1041                 copy_grid_element(subdiv_ccg,
1042                                   key,
1043                                   grid_element_face_index,
1044                                   grid_element_0);
1045         }
1046 }
1047
1048 static void subdiv_ccg_average_grids_corners_task(
1049         void *__restrict userdata_v,
1050         const int adjacent_vertex_index,
1051         const ParallelRangeTLS *__restrict UNUSED(tls_v))
1052 {
1053         AverageGridsCornerData *data = userdata_v;
1054         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1055         CCGKey *key = data->key;
1056         SubdivCCGAdjacentVertex *adjacent_vertex =
1057                 &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
1058         subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
1059 }
1060
1061 static void subdiv_ccg_average_all_boundaries_and_corners(
1062         SubdivCCG *subdiv_ccg,
1063         CCGKey *key)
1064 {
1065         ParallelRangeSettings parallel_range_settings;
1066         BLI_parallel_range_settings_defaults(&parallel_range_settings);
1067         /* Average grids across coarse edges. */
1068         AverageGridsBoundariesData boundaries_data = {
1069                 .subdiv_ccg = subdiv_ccg,
1070                 .key = key,
1071         };
1072         BLI_task_parallel_range(0, subdiv_ccg->num_adjacent_edges,
1073                                 &boundaries_data,
1074                                 subdiv_ccg_average_grids_boundaries_task,
1075                                 &parallel_range_settings);
1076         /* Average grids at coarse vertices. */
1077         AverageGridsCornerData corner_data = {
1078                 .subdiv_ccg = subdiv_ccg,
1079                 .key = key,
1080         };
1081         BLI_task_parallel_range(0, subdiv_ccg->num_adjacent_vertices,
1082                                 &corner_data,
1083                                 subdiv_ccg_average_grids_corners_task,
1084                                 &parallel_range_settings);
1085 }
1086
1087 void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
1088 {
1089         CCGKey key;
1090         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
1091         ParallelRangeSettings parallel_range_settings;
1092         BLI_parallel_range_settings_defaults(&parallel_range_settings);
1093         /* Average inner boundaries of grids (within one face), across faces
1094          * from different face-corners. */
1095         AverageInnerGridsData inner_data = {
1096                 .subdiv_ccg = subdiv_ccg,
1097                 .key = &key,
1098         };
1099         BLI_task_parallel_range(0, subdiv_ccg->num_faces,
1100                                 &inner_data,
1101                                 subdiv_ccg_average_inner_grids_task,
1102                                 &parallel_range_settings);
1103         subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
1104 }
1105
1106 typedef struct StitchFacesInnerGridsData {
1107         SubdivCCG *subdiv_ccg;
1108         CCGKey *key;
1109         struct CCGFace **effected_ccg_faces;
1110 } StitchFacesInnerGridsData;
1111
1112 static void subdiv_ccg_stitch_face_inner_grids_task(
1113         void *__restrict userdata_v,
1114         const int face_index,
1115         const ParallelRangeTLS *__restrict UNUSED(tls_v))
1116 {
1117         StitchFacesInnerGridsData *data = userdata_v;
1118         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1119         CCGKey *key = data->key;
1120         struct CCGFace **effected_ccg_faces = data->effected_ccg_faces;
1121         struct CCGFace *effected_ccg_face = effected_ccg_faces[face_index];
1122         SubdivCCGFace *face = (SubdivCCGFace *)effected_ccg_face;
1123         subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
1124 }
1125
1126 void BKE_subdiv_ccg_average_stitch_faces(SubdivCCG *subdiv_ccg,
1127                                          struct CCGFace **effected_faces,
1128                                          int num_effected_faces)
1129 {
1130         CCGKey key;
1131         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
1132         StitchFacesInnerGridsData data = {
1133                 .subdiv_ccg = subdiv_ccg,
1134                 .key = &key,
1135                 .effected_ccg_faces = effected_faces,
1136         };
1137         ParallelRangeSettings parallel_range_settings;
1138         BLI_parallel_range_settings_defaults(&parallel_range_settings);
1139         BLI_task_parallel_range(0, num_effected_faces,
1140                                 &data,
1141                                 subdiv_ccg_stitch_face_inner_grids_task,
1142                                 &parallel_range_settings);
1143         /* TODO(sergey): Only average elements which are adjacent to modified
1144          * faces. */
1145         subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
1146 }
1147
1148 void BKE_subdiv_ccg_topology_counters(
1149         const SubdivCCG *subdiv_ccg,
1150         int *r_num_vertices, int *r_num_edges,
1151         int *r_num_faces, int *r_num_loops)
1152 {
1153         const int num_grids = subdiv_ccg->num_grids;
1154         const int grid_size = subdiv_ccg->grid_size;
1155         const int grid_area = grid_size * grid_size;
1156         const int num_edges_per_grid = 2 * (grid_size * (grid_size - 1));
1157         *r_num_vertices = num_grids * grid_area;
1158         *r_num_edges = num_grids * num_edges_per_grid;
1159         *r_num_faces = num_grids * (grid_size - 1) * (grid_size - 1);
1160         *r_num_loops = *r_num_faces * 4;
1161 }