doxygen: add newline after \file
[blender.git] / source / blender / blenkernel / intern / subdiv_ccg.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2018 by Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup bke
22  */
23
24 #include "BKE_subdiv_ccg.h"
25
26 #include "DNA_mesh_types.h"
27 #include "DNA_meshdata_types.h"
28
29 #include "MEM_guardedalloc.h"
30
31 #include "BLI_math_bits.h"
32 #include "BLI_math_vector.h"
33 #include "BLI_task.h"
34
35 #include "BKE_DerivedMesh.h"
36 #include "BKE_ccg.h"
37 #include "BKE_mesh.h"
38 #include "BKE_subdiv.h"
39 #include "BKE_subdiv_eval.h"
40
41 #include "opensubdiv_topology_refiner_capi.h"
42
43 /* =============================================================================
44  * Generally useful internal helpers.
45  */
46
47 /* Number of floats in per-vertex elements.  */
48 static int num_element_float_get(const SubdivCCG *subdiv_ccg)
49 {
50         /* We always have 3 floats for coordinate. */
51         int num_floats = 3;
52         if (subdiv_ccg->has_normal) {
53                 num_floats += 3;
54         }
55         if (subdiv_ccg->has_mask) {
56                 num_floats += 1;
57         }
58         return num_floats;
59 }
60
61 /* Per-vertex element size in bytes. */
62 static int element_size_bytes_get(const SubdivCCG *subdiv_ccg)
63 {
64         return sizeof(float) * num_element_float_get(subdiv_ccg);
65 }
66
67 /* =============================================================================
68  * Internal helpers for CCG creation.
69  */
70
71 static void subdiv_ccg_init_layers(SubdivCCG *subdiv_ccg,
72                                    const SubdivToCCGSettings *settings)
73 {
74         /* CCG always contains coordinates. Rest of layers are coming after them. */
75         int layer_offset = sizeof(float) * 3;
76         /* Mask. */
77         if (settings->need_mask) {
78                 subdiv_ccg->has_mask = true;
79                 subdiv_ccg->mask_offset = layer_offset;
80                 layer_offset += sizeof(float);
81         }
82         else {
83                 subdiv_ccg->has_mask = false;
84                 subdiv_ccg->mask_offset = -1;
85         }
86         /* Normals.
87          *
88          * NOTE: Keep them at the end, matching old CCGDM. Doesn't really matter
89          * here, but some other area might in theory depend memory layout. */
90         if (settings->need_normal) {
91                 subdiv_ccg->has_normal = true;
92                 subdiv_ccg->normal_offset = layer_offset;
93                 layer_offset += sizeof(float) * 3;
94         }
95         else {
96                 subdiv_ccg->has_normal = false;
97                 subdiv_ccg->normal_offset = -1;
98         }
99 }
100
101 /* TODO(sergey): Make it more accessible function. */
102 static int topology_refiner_count_face_corners(
103         OpenSubdiv_TopologyRefiner *topology_refiner)
104 {
105         const int num_faces = topology_refiner->getNumFaces(topology_refiner);
106         int num_corners = 0;
107         for (int face_index = 0; face_index < num_faces; face_index++) {
108                 num_corners += topology_refiner->getNumFaceVertices(
109                         topology_refiner, face_index);
110         }
111         return num_corners;
112 }
113
114 /* NOTE: Grid size and layer flags are to be filled in before calling this
115  * function. */
116 static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
117 {
118         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
119         const int element_size = element_size_bytes_get(subdiv_ccg);
120         /* Allocate memory for surface grids. */
121         const int num_faces = topology_refiner->getNumFaces(topology_refiner);
122         const int num_grids = topology_refiner_count_face_corners(topology_refiner);
123         const int grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
124         const int grid_area = grid_size * grid_size;
125         subdiv_ccg->num_grids = num_grids;
126         subdiv_ccg->grids =
127                 MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids");
128         subdiv_ccg->grids_storage = MEM_calloc_arrayN(
129                 num_grids, ((size_t)grid_area) * element_size,
130                 "subdiv ccg grids storage");
131         const size_t grid_size_in_bytes = (size_t)grid_area * element_size;
132         for (int grid_index = 0; grid_index < num_grids; grid_index++) {
133                 const size_t grid_offset = grid_size_in_bytes * grid_index;
134                 subdiv_ccg->grids[grid_index] =
135                         (CCGElem *)&subdiv_ccg->grids_storage[grid_offset];
136         }
137         /* Grid material flags. */
138         subdiv_ccg->grid_flag_mats = MEM_calloc_arrayN(
139                 num_grids, sizeof(DMFlagMat), "ccg grid material flags");
140         /* Grid hidden flags. */
141         subdiv_ccg->grid_hidden = MEM_calloc_arrayN(
142                 num_grids, sizeof(BLI_bitmap *), "ccg grid material flags");
143         for (int grid_index = 0; grid_index < num_grids; grid_index++) {
144                 subdiv_ccg->grid_hidden[grid_index] =
145                         BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
146         }
147         /* TODO(sergey): Allocate memory for loose elements. */
148         /* Allocate memory for faces. */
149         subdiv_ccg->num_faces = num_faces;
150         if (num_faces) {
151                 subdiv_ccg->faces = MEM_calloc_arrayN(
152                         num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces");
153                 subdiv_ccg->grid_faces = MEM_calloc_arrayN(
154                         num_grids, sizeof(SubdivCCGFace *), "Subdiv CCG grid faces");
155         }
156 }
157
158 /* =============================================================================
159  * Grids evaluation.
160  */
161
162 typedef struct CCGEvalGridsData {
163         SubdivCCG *subdiv_ccg;
164         Subdiv *subdiv;
165         int *face_ptex_offset;
166         SubdivCCGMask *mask_evaluator;
167 } CCGEvalGridsData;
168
169 static void subdiv_ccg_eval_grid_element(
170         CCGEvalGridsData *data,
171         const int ptex_face_index,
172         const float u, const float v,
173         unsigned char *element)
174 {
175         Subdiv *subdiv = data->subdiv;
176         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
177         if (subdiv->displacement_evaluator != NULL) {
178                 BKE_subdiv_eval_final_point(
179                         subdiv, ptex_face_index, u, v, (float *)element);
180         }
181         else if (subdiv_ccg->has_normal) {
182                 BKE_subdiv_eval_limit_point_and_normal(
183                         subdiv, ptex_face_index, u, v,
184                         (float *)element,
185                         (float *)(element + subdiv_ccg->normal_offset));
186         }
187         else {
188                 BKE_subdiv_eval_limit_point(
189                         subdiv, ptex_face_index, u, v, (float *)element);
190         }
191         if (subdiv_ccg->has_mask) {
192                 float *mask_value_ptr = (float *)(element + subdiv_ccg->mask_offset);
193                 if (data->mask_evaluator != NULL) {
194                         *mask_value_ptr = data->mask_evaluator->eval_mask(
195                                 data->mask_evaluator, ptex_face_index, u, v);
196                 }
197                 else {
198                         *mask_value_ptr = 0.0f;
199                 }
200         }
201 }
202
203 static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data,
204                                          const int face_index)
205 {
206         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
207         const int ptex_face_index = data->face_ptex_offset[face_index];
208         const int grid_size = subdiv_ccg->grid_size;
209         const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
210         const int element_size = element_size_bytes_get(subdiv_ccg);
211         SubdivCCGFace *faces = subdiv_ccg->faces;
212         SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
213         const SubdivCCGFace *face = &faces[face_index];
214         for (int corner = 0; corner < face->num_grids; corner++) {
215                 const int grid_index = face->start_grid_index + corner;
216                 unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
217                 for (int y = 0; y < grid_size; y++) {
218                         const float grid_v = (float)y * grid_size_1_inv;
219                         for (int x = 0; x < grid_size; x++) {
220                                 const float grid_u = (float)x * grid_size_1_inv;
221                                 float u, v;
222                                 BKE_subdiv_rotate_grid_to_quad(
223                                         corner, grid_u, grid_v, &u, &v);
224                                 const size_t grid_element_index = (size_t)y * grid_size + x;
225                                 const size_t grid_element_offset =
226                                         grid_element_index * element_size;
227                                 subdiv_ccg_eval_grid_element(
228                                         data,
229                                         ptex_face_index, u, v,
230                                         &grid[grid_element_offset]);
231                         }
232                 }
233                 /* Assign grid's face. */
234                 grid_faces[grid_index] = &faces[face_index];
235         }
236 }
237
238 static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data,
239                                          const int face_index)
240 {
241         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
242         const int grid_size = subdiv_ccg->grid_size;
243         const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
244         const int element_size = element_size_bytes_get(subdiv_ccg);
245         SubdivCCGFace *faces = subdiv_ccg->faces;
246         SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
247         const SubdivCCGFace *face = &faces[face_index];
248         for (int corner = 0; corner < face->num_grids; corner++) {
249                 const int grid_index = face->start_grid_index + corner;
250                 unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
251                 for (int y = 0; y < grid_size; y++) {
252                         const float u = 1.0f - ((float)y * grid_size_1_inv);
253                         for (int x = 0; x < grid_size; x++) {
254                                 const float v = 1.0f - ((float)x * grid_size_1_inv);
255                                 const int ptex_face_index =
256                                         data->face_ptex_offset[face_index] + corner;
257                                 const size_t grid_element_index = (size_t)y * grid_size + x;
258                                 const size_t grid_element_offset =
259                                         grid_element_index * element_size;
260                                 subdiv_ccg_eval_grid_element(
261                                         data,
262                                         ptex_face_index, u, v,
263                                         &grid[grid_element_offset]);
264                         }
265                 }
266                 /* Assign grid's face. */
267                 grid_faces[grid_index] = &faces[face_index];
268         }
269 }
270
271 static void subdiv_ccg_eval_grids_task(
272         void *__restrict userdata_v,
273         const int face_index,
274         const ParallelRangeTLS *__restrict UNUSED(tls))
275 {
276         CCGEvalGridsData *data = userdata_v;
277         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
278         SubdivCCGFace *face = &subdiv_ccg->faces[face_index];
279         if (face->num_grids == 4) {
280                 subdiv_ccg_eval_regular_grid(data, face_index);
281         }
282         else {
283                 subdiv_ccg_eval_special_grid(data, face_index);
284         }
285 }
286
287 static bool subdiv_ccg_evaluate_grids(
288         SubdivCCG *subdiv_ccg,
289         Subdiv *subdiv,
290         SubdivCCGMask *mask_evaluator)
291 {
292         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
293         const int num_faces = topology_refiner->getNumFaces(topology_refiner);
294         /* Initialize data passed to all the tasks. */
295         CCGEvalGridsData data;
296         data.subdiv_ccg = subdiv_ccg;
297         data.subdiv = subdiv;
298         data.face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
299         data.mask_evaluator = mask_evaluator;
300         /* Threaded grids evaluation. */
301         ParallelRangeSettings parallel_range_settings;
302         BLI_parallel_range_settings_defaults(&parallel_range_settings);
303         BLI_task_parallel_range(0, num_faces,
304                                 &data,
305                                 subdiv_ccg_eval_grids_task,
306                                 &parallel_range_settings);
307         /* If displacement is used, need to calculate normals after all final
308          * coordinates are known. */
309         if (subdiv->displacement_evaluator != NULL) {
310                 BKE_subdiv_ccg_recalc_normals(subdiv_ccg);
311         }
312         return true;
313 }
314
315 /* Initialize face descriptors, assuming memory for them was already
316  * allocated. */
317 static void subdiv_ccg_init_faces(SubdivCCG *subdiv_ccg)
318 {
319         Subdiv *subdiv = subdiv_ccg->subdiv;
320         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
321         const int num_faces = subdiv_ccg->num_faces;
322         int corner_index = 0;
323         for (int face_index = 0; face_index < num_faces; face_index++) {
324                 const int num_corners = topology_refiner->getNumFaceVertices(
325                         topology_refiner, face_index);
326                 subdiv_ccg->faces[face_index].num_grids = num_corners;
327                 subdiv_ccg->faces[face_index].start_grid_index = corner_index;
328                 corner_index += num_corners;
329         }
330 }
331
332 /* TODO(sergey): Consider making it generic enough to be fit into BLI. */
333 typedef struct StaticOrHeapIntStorage {
334         int static_storage[64];
335         int static_storage_size;
336         int *heap_storage;
337         int heap_storage_size;
338 } StaticOrHeapIntStorage;
339
340 static void static_or_heap_storage_init(StaticOrHeapIntStorage *storage)
341 {
342         storage->static_storage_size =
343                 sizeof(storage->static_storage) / sizeof(*storage->static_storage);
344         storage->heap_storage = NULL;
345         storage->heap_storage_size = 0;
346 }
347
348 static int *static_or_heap_storage_get(StaticOrHeapIntStorage *storage,
349                                        int size)
350 {
351         /* Requested size small enough to be fit into stack allocated memory. */
352         if (size <= storage->static_storage_size) {
353                 return storage->static_storage;
354         }
355         /* Make sure heap ius big enough. */
356         if (size > storage->heap_storage_size) {
357                 MEM_SAFE_FREE(storage->heap_storage);
358                 storage->heap_storage = MEM_malloc_arrayN(
359                         size, sizeof(int), "int storage");
360                 storage->heap_storage_size = size;
361         }
362         return storage->heap_storage;
363 }
364
365 static void static_or_heap_storage_free(StaticOrHeapIntStorage *storage)
366 {
367         MEM_SAFE_FREE(storage->heap_storage);
368 }
369
370 static void subdiv_ccg_allocate_adjacent_edges(SubdivCCG *subdiv_ccg,
371                                                const int num_edges)
372 {
373         subdiv_ccg->num_adjacent_edges = num_edges;
374         subdiv_ccg->adjacent_edges = MEM_calloc_arrayN(
375                 subdiv_ccg->num_adjacent_edges,
376                 sizeof(*subdiv_ccg->adjacent_edges),
377                 "ccg adjacent edges");
378 }
379
380 /* Returns storage where boundary elements are to be stored. */
381 static CCGElem **subdiv_ccg_adjacent_edge_add_face(
382         SubdivCCG *subdiv_ccg,
383         SubdivCCGAdjacentEdge *adjacent_edge,
384         SubdivCCGFace *face)
385 {
386         const int grid_size = subdiv_ccg->grid_size * 2;
387         const int adjacent_face_index = adjacent_edge->num_adjacent_faces;
388         ++adjacent_edge->num_adjacent_faces;
389         /* Store new adjacent face. */
390         adjacent_edge->faces = MEM_reallocN(
391                 adjacent_edge->faces,
392                 adjacent_edge->num_adjacent_faces * sizeof(*adjacent_edge->faces));
393         adjacent_edge->faces[adjacent_face_index] = face;
394         /* Allocate memory for the boundary elements. */
395         adjacent_edge->boundary_elements = MEM_reallocN(
396                 adjacent_edge->boundary_elements,
397                 adjacent_edge->num_adjacent_faces *
398                         sizeof(*adjacent_edge->boundary_elements));
399         adjacent_edge->boundary_elements[adjacent_face_index] =
400                 MEM_malloc_arrayN(
401                         grid_size * 2, sizeof(CCGElem *), "ccg adjacent boundary");
402         return adjacent_edge->boundary_elements[adjacent_face_index];
403 }
404
405 static void subdiv_ccg_init_faces_edge_neighborhood(SubdivCCG *subdiv_ccg)
406 {
407         Subdiv *subdiv = subdiv_ccg->subdiv;
408         SubdivCCGFace *faces = subdiv_ccg->faces;
409         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
410         const int num_edges = topology_refiner->getNumEdges(topology_refiner);
411         const int grid_size = subdiv_ccg->grid_size;
412         if (num_edges == 0) {
413                 /* Early output, nothing to do in this case. */
414                 return;
415         }
416         subdiv_ccg_allocate_adjacent_edges(subdiv_ccg, num_edges);
417         /* Initialize storage. */
418         StaticOrHeapIntStorage face_vertices_storage;
419         StaticOrHeapIntStorage face_edges_storage;
420         static_or_heap_storage_init(&face_vertices_storage);
421         static_or_heap_storage_init(&face_edges_storage);
422         /* Key to access elements. */
423         CCGKey key;
424         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
425         /* Store adjacency for all faces. */
426         const int num_faces = subdiv_ccg->num_faces;
427         for (int face_index = 0; face_index < num_faces; face_index++) {
428                 SubdivCCGFace *face = &faces[face_index];
429                 const int num_face_grids = face->num_grids;
430                 const int num_face_edges = num_face_grids;
431                 int *face_vertices = static_or_heap_storage_get(
432                         &face_vertices_storage, num_face_edges);
433                 topology_refiner->getFaceVertices(
434                         topology_refiner, face_index, face_vertices);
435                 /* Note that order of edges is same as order of MLoops, which also
436                  * means it's the same as order of grids. */
437                 int *face_edges = static_or_heap_storage_get(
438                         &face_edges_storage, num_face_edges);
439                 topology_refiner->getFaceEdges(
440                         topology_refiner, face_index, face_edges);
441                 /* Store grids adjacency for this edge. */
442                 for (int corner = 0; corner < num_face_edges; corner++) {
443                         const int vertex_index = face_vertices[corner];
444                         const int edge_index = face_edges[corner];
445                         int edge_vertices[2];
446                         topology_refiner->getEdgeVertices(
447                                 topology_refiner, edge_index, edge_vertices);
448                         const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
449                         /* Grid which is adjacent to the current corner. */
450                         const int current_grid_index = face->start_grid_index + corner;
451                         CCGElem *current_grid = subdiv_ccg->grids[current_grid_index];
452                         /* Grid which is adjacent to the next corner. */
453                         const int next_grid_index =
454                                 face->start_grid_index + (corner + 1) % num_face_grids;
455                         CCGElem *next_grid = subdiv_ccg->grids[next_grid_index];
456                         /* Add new face to the adjacent edge. */
457                         SubdivCCGAdjacentEdge *adjacent_edge =
458                                 &subdiv_ccg->adjacent_edges[edge_index];
459                         CCGElem **boundary_elements = subdiv_ccg_adjacent_edge_add_face(
460                                 subdiv_ccg, adjacent_edge, face);
461                         /* Fill CCG elements along the edge. */
462                         int boundary_element_index = 0;
463                         if (is_edge_flipped) {
464                                 for (int i = 0; i < grid_size; i++) {
465                                         boundary_elements[boundary_element_index++] =
466                                                 CCG_grid_elem(&key,
467                                                               next_grid,
468                                                               grid_size - i - 1,
469                                                               grid_size - 1);
470                                 }
471                                 for (int i = 0; i < grid_size; i++) {
472                                         boundary_elements[boundary_element_index++] =
473                                                 CCG_grid_elem(&key,
474                                                               current_grid,
475                                                               grid_size - 1,
476                                                               i);
477                                 }
478                         }
479                         else {
480                                 for (int i = 0; i < grid_size; i++) {
481                                         boundary_elements[boundary_element_index++] =
482                                                 CCG_grid_elem(&key,
483                                                               current_grid,
484                                                               grid_size - 1,
485                                                               grid_size - i - 1);
486                                 }
487                                 for (int i = 0; i < grid_size; i++) {
488                                         boundary_elements[boundary_element_index++] =
489                                                 CCG_grid_elem(&key,
490                                                               next_grid,
491                                                               i,
492                                                               grid_size - 1);
493                                 }
494                         }
495                 }
496         }
497         /* Free possibly heap-allocated storage. */
498         static_or_heap_storage_free(&face_vertices_storage);
499         static_or_heap_storage_free(&face_edges_storage);
500 }
501
502 static void subdiv_ccg_allocate_adjacent_vertices(SubdivCCG *subdiv_ccg,
503                                                   const int num_vertices)
504 {
505         subdiv_ccg->num_adjacent_vertices = num_vertices;
506         subdiv_ccg->adjacent_vertices = MEM_calloc_arrayN(
507                 subdiv_ccg->num_adjacent_vertices,
508                 sizeof(*subdiv_ccg->adjacent_vertices),
509                 "ccg adjacent vertices");
510 }
511
512 /* Returns storage where corner elements are to be stored. This is a pointer
513  * to the actual storage. */
514 static CCGElem **subdiv_ccg_adjacent_vertex_add_face(
515         SubdivCCGAdjacentVertex *adjacent_vertex,
516         SubdivCCGFace *face)
517 {
518         const int adjacent_face_index = adjacent_vertex->num_adjacent_faces;
519         ++adjacent_vertex->num_adjacent_faces;
520         /* Store new adjacent face. */
521         adjacent_vertex->faces = MEM_reallocN(
522                 adjacent_vertex->faces,
523                 adjacent_vertex->num_adjacent_faces *
524                         sizeof(*adjacent_vertex->faces));
525         adjacent_vertex->faces[adjacent_face_index] = face;
526         /* Allocate memory for the boundary elements. */
527         adjacent_vertex->corner_elements = MEM_reallocN(
528                 adjacent_vertex->corner_elements,
529                 adjacent_vertex->num_adjacent_faces *
530                         sizeof(*adjacent_vertex->corner_elements));
531         return &adjacent_vertex->corner_elements[adjacent_face_index];
532 }
533
534 static void subdiv_ccg_init_faces_vertex_neighborhood(SubdivCCG *subdiv_ccg)
535 {
536         Subdiv *subdiv = subdiv_ccg->subdiv;
537         SubdivCCGFace *faces = subdiv_ccg->faces;
538         OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
539         const int num_vertices =
540                 topology_refiner->getNumVertices(topology_refiner);
541         const int grid_size = subdiv_ccg->grid_size;
542         if (num_vertices == 0) {
543                 /* Early output, nothing to do in this case. */
544                 return;
545         }
546         subdiv_ccg_allocate_adjacent_vertices(subdiv_ccg, num_vertices);
547         /* Initialize storage. */
548         StaticOrHeapIntStorage face_vertices_storage;
549         static_or_heap_storage_init(&face_vertices_storage);
550         /* Key to access elements. */
551         CCGKey key;
552         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
553         /* Store adjacency for all faces. */
554         const int num_faces = subdiv_ccg->num_faces;
555         for (int face_index = 0; face_index < num_faces; face_index++) {
556                 SubdivCCGFace *face = &faces[face_index];
557                 const int num_face_grids = face->num_grids;
558                 const int num_face_edges = num_face_grids;
559                 int *face_vertices = static_or_heap_storage_get(
560                         &face_vertices_storage, num_face_edges);
561                 topology_refiner->getFaceVertices(
562                         topology_refiner, face_index, face_vertices);
563                 for (int corner = 0; corner < num_face_edges; corner++) {
564                         const int vertex_index = face_vertices[corner];
565                         /* Grid which is adjacent to the current corner. */
566                         const int grid_index = face->start_grid_index + corner;
567                         CCGElem *grid = subdiv_ccg->grids[grid_index];
568                         /* Add new face to the adjacent edge. */
569                         SubdivCCGAdjacentVertex *adjacent_vertex =
570                                 &subdiv_ccg->adjacent_vertices[vertex_index];
571                         CCGElem **corner_element = subdiv_ccg_adjacent_vertex_add_face(
572                                 adjacent_vertex, face);
573                         *corner_element = CCG_grid_elem(
574                                 &key, grid, grid_size - 1, grid_size - 1);
575                 }
576         }
577         /* Free possibly heap-allocated storage. */
578         static_or_heap_storage_free(&face_vertices_storage);
579 }
580
581 static void subdiv_ccg_init_faces_neighborhood(SubdivCCG *subdiv_ccg)
582 {
583         subdiv_ccg_init_faces_edge_neighborhood(subdiv_ccg);
584         subdiv_ccg_init_faces_vertex_neighborhood(subdiv_ccg);
585 }
586
587 /* =============================================================================
588  * Creation / evaluation.
589  */
590
591 SubdivCCG *BKE_subdiv_to_ccg(
592         Subdiv *subdiv,
593         const SubdivToCCGSettings *settings,
594         SubdivCCGMask *mask_evaluator)
595 {
596         BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
597         SubdivCCG *subdiv_ccg = MEM_callocN(sizeof(SubdivCCG), "subdiv ccg");
598         subdiv_ccg->subdiv = subdiv;
599         subdiv_ccg->level = bitscan_forward_i(settings->resolution - 1);
600         subdiv_ccg->grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
601         subdiv_ccg_init_layers(subdiv_ccg, settings);
602         subdiv_ccg_alloc_elements(subdiv_ccg, subdiv);
603         subdiv_ccg_init_faces(subdiv_ccg);
604         subdiv_ccg_init_faces_neighborhood(subdiv_ccg);
605         if (!subdiv_ccg_evaluate_grids(subdiv_ccg, subdiv, mask_evaluator)) {
606                 BKE_subdiv_ccg_destroy(subdiv_ccg);
607                 BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
608                 return NULL;
609         }
610         BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
611         return subdiv_ccg;
612 }
613
614 Mesh *BKE_subdiv_to_ccg_mesh(
615         Subdiv *subdiv,
616         const SubdivToCCGSettings *settings,
617         const Mesh *coarse_mesh)
618 {
619         /* Make sure evaluator is ready. */
620         BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
621         if (!BKE_subdiv_eval_update_from_mesh(subdiv, coarse_mesh)) {
622                 if (coarse_mesh->totpoly) {
623                         return false;
624                 }
625         }
626         BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
627         SubdivCCGMask mask_evaluator;
628         bool has_mask = BKE_subdiv_ccg_mask_init_from_paint(
629         &mask_evaluator, coarse_mesh);
630         SubdivCCG *subdiv_ccg = BKE_subdiv_to_ccg(
631             subdiv, settings, has_mask ? &mask_evaluator : NULL);
632         if (has_mask) {
633                 mask_evaluator.free(&mask_evaluator);
634         }
635         if (subdiv_ccg == NULL) {
636                 return NULL;
637         }
638         Mesh *result = BKE_mesh_new_nomain_from_template(
639                 coarse_mesh, 0, 0, 0, 0, 0);
640         result->runtime.subdiv_ccg = subdiv_ccg;
641         return result;
642 }
643
644 void BKE_subdiv_ccg_destroy(SubdivCCG *subdiv_ccg)
645 {
646         const int num_grids = subdiv_ccg->num_grids;
647         MEM_SAFE_FREE(subdiv_ccg->grids);
648         MEM_SAFE_FREE(subdiv_ccg->grids_storage);
649         MEM_SAFE_FREE(subdiv_ccg->edges);
650         MEM_SAFE_FREE(subdiv_ccg->vertices);
651         MEM_SAFE_FREE(subdiv_ccg->grid_flag_mats);
652         if (subdiv_ccg->grid_hidden != NULL) {
653                 for (int grid_index = 0; grid_index < num_grids; grid_index++) {
654                         MEM_freeN(subdiv_ccg->grid_hidden[grid_index]);
655                 }
656                 MEM_freeN(subdiv_ccg->grid_hidden);
657         }
658         if (subdiv_ccg->subdiv != NULL) {
659                 BKE_subdiv_free(subdiv_ccg->subdiv);
660         }
661         MEM_SAFE_FREE(subdiv_ccg->faces);
662         MEM_SAFE_FREE(subdiv_ccg->grid_faces);
663         /* Free map of adjacent edges. */
664         for (int i = 0; i < subdiv_ccg->num_adjacent_edges; i++) {
665                 SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[i];
666                 for (int face_index = 0;
667                      face_index < adjacent_edge->num_adjacent_faces;
668                      face_index++)
669                 {
670                         MEM_SAFE_FREE(adjacent_edge->boundary_elements[face_index]);
671                 }
672                 MEM_SAFE_FREE(adjacent_edge->faces);
673                 MEM_SAFE_FREE(adjacent_edge->boundary_elements);
674         }
675         MEM_SAFE_FREE(subdiv_ccg->adjacent_edges);
676         /* Free map of adjacent vertices. */
677         for (int i = 0; i < subdiv_ccg->num_adjacent_vertices; i++) {
678                 SubdivCCGAdjacentVertex *adjacent_vertex =
679                         &subdiv_ccg->adjacent_vertices[i];
680                 MEM_SAFE_FREE(adjacent_vertex->faces);
681                 MEM_SAFE_FREE(adjacent_vertex->corner_elements);
682         }
683         MEM_SAFE_FREE(subdiv_ccg->adjacent_vertices);
684         MEM_freeN(subdiv_ccg);
685 }
686
687 void BKE_subdiv_ccg_key(CCGKey *key, const SubdivCCG *subdiv_ccg, int level)
688 {
689         key->level = level;
690         key->elem_size = element_size_bytes_get(subdiv_ccg);
691         key->grid_size = BKE_subdiv_grid_size_from_level(level);
692         key->grid_area = key->grid_size * key->grid_size;
693         key->grid_bytes = key->elem_size * key->grid_area;
694
695         key->normal_offset = subdiv_ccg->normal_offset;
696         key->mask_offset = subdiv_ccg->mask_offset;
697
698         key->has_normals = subdiv_ccg->has_normal;
699         key->has_mask = subdiv_ccg->has_mask;
700 }
701
702 void BKE_subdiv_ccg_key_top_level(CCGKey *key, const SubdivCCG *subdiv_ccg)
703 {
704         BKE_subdiv_ccg_key(key, subdiv_ccg, subdiv_ccg->level);
705 }
706
707 /* =============================================================================
708  * Normals.
709  */
710
711 typedef struct RecalcInnerNormalsData {
712         SubdivCCG *subdiv_ccg;
713         CCGKey *key;
714 } RecalcInnerNormalsData;
715
716 typedef struct RecalcInnerNormalsTLSData {
717         float (*face_normals)[3];
718 } RecalcInnerNormalsTLSData;
719
720 /* Evaluate high-res face normals, for faces which corresponds to grid elements
721  *
722  *   {(x, y), {x + 1, y}, {x + 1, y + 1}, {x, y + 1}}
723  *
724  * The result is stored in normals storage from TLS. */
725 static void subdiv_ccg_recalc_inner_face_normals(
726         RecalcInnerNormalsData *data,
727         RecalcInnerNormalsTLSData *tls,
728         const int grid_index)
729 {
730         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
731         CCGKey *key = data->key;
732         const int grid_size = subdiv_ccg->grid_size;
733         const int grid_size_1 = grid_size - 1;
734         CCGElem *grid = subdiv_ccg->grids[grid_index];
735         if (tls->face_normals == NULL) {
736                 tls->face_normals = MEM_malloc_arrayN(
737                         grid_size_1 * grid_size_1,
738                         3 * sizeof(float),
739                         "CCG TLS normals");
740         }
741         for (int y = 0; y < grid_size -1; y++) {
742                 for (int x = 0; x < grid_size - 1; x++) {
743                         CCGElem *grid_elements[4] = {
744                                 CCG_grid_elem(key, grid, x, y + 1),
745                                 CCG_grid_elem(key, grid, x + 1, y + 1),
746                                 CCG_grid_elem(key, grid, x + 1, y),
747                                 CCG_grid_elem(key, grid, x, y),
748                         };
749                         float *co[4] = {
750                             CCG_elem_co(key, grid_elements[0]),
751                             CCG_elem_co(key, grid_elements[1]),
752                             CCG_elem_co(key, grid_elements[2]),
753                             CCG_elem_co(key, grid_elements[3]),
754                         };
755                         const int face_index = y * grid_size_1 + x;
756                         float *face_normal = tls->face_normals[face_index];
757                         normal_quad_v3(face_normal, co[0], co[1], co[2], co[3]);
758                 }
759         }
760 }
761
762 /* Average normals at every grid element, using adjacent faces normals. */
763 static void subdiv_ccg_average_inner_face_normals(
764         RecalcInnerNormalsData *data,
765         RecalcInnerNormalsTLSData *tls,
766         const int grid_index)
767 {
768         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
769         CCGKey *key = data->key;
770         const int grid_size = subdiv_ccg->grid_size;
771         const int grid_size_1 = grid_size - 1;
772         CCGElem *grid = subdiv_ccg->grids[grid_index];
773         const float (*face_normals)[3] = tls->face_normals;
774         for (int y = 0; y < grid_size; y++) {
775                 for (int x = 0; x < grid_size; x++) {
776                         float normal_acc[3] = {0.0f, 0.0f, 0.0f};
777                         int counter = 0;
778                         /* Accumulate normals of all adjacent faces. */
779                         if (x < grid_size_1 && y < grid_size_1) {
780                                 add_v3_v3(normal_acc, face_normals[y * grid_size_1 + x]);
781                                 counter++;
782                         }
783                         if (x >= 1) {
784                                 if (y < grid_size_1) {
785                                         add_v3_v3(normal_acc,
786                                                   face_normals[y * grid_size_1 + (x - 1)]);
787                                         counter++;
788                                 }
789                                 if (y >= 1) {
790                                         add_v3_v3(normal_acc,
791                                                   face_normals[(y - 1) * grid_size_1 + (x - 1)]);
792                                         counter++;
793                                 }
794                         }
795                         if (y >= 1 && x < grid_size_1) {
796                                 add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + x]);
797                                 counter++;
798                         }
799                         /* Normalize and store. */
800                         mul_v3_v3fl(CCG_grid_elem_no(key, grid, x, y),
801                                     normal_acc,
802                                     1.0f / (float)counter);
803                 }
804         }
805 }
806
807 static void subdiv_ccg_recalc_inner_normal_task(
808         void *__restrict userdata_v,
809         const int grid_index,
810         const ParallelRangeTLS *__restrict tls_v)
811 {
812         RecalcInnerNormalsData *data = userdata_v;
813         RecalcInnerNormalsTLSData *tls = tls_v->userdata_chunk;
814         subdiv_ccg_recalc_inner_face_normals(data, tls, grid_index);
815         subdiv_ccg_average_inner_face_normals(data, tls, grid_index);
816 }
817
818 static void subdiv_ccg_recalc_inner_normal_finalize(
819         void *__restrict UNUSED(userdata),
820         void *__restrict tls_v)
821 {
822         RecalcInnerNormalsTLSData *tls = tls_v;
823         MEM_SAFE_FREE(tls->face_normals);
824 }
825
826 /* Recalculate normals which corresponds to non-boundaries elements of grids. */
827 static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
828 {
829         CCGKey key;
830         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
831         RecalcInnerNormalsData data = {
832                 .subdiv_ccg = subdiv_ccg,
833                 .key = &key,
834         };
835         RecalcInnerNormalsTLSData tls_data = {NULL};
836         ParallelRangeSettings parallel_range_settings;
837         BLI_parallel_range_settings_defaults(&parallel_range_settings);
838         parallel_range_settings.userdata_chunk = &tls_data;
839         parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
840         parallel_range_settings.func_finalize =
841                 subdiv_ccg_recalc_inner_normal_finalize;
842         BLI_task_parallel_range(0, subdiv_ccg->num_grids,
843                                 &data,
844                                 subdiv_ccg_recalc_inner_normal_task,
845                                 &parallel_range_settings);
846 }
847
848 void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg)
849 {
850         if (!subdiv_ccg->has_normal) {
851                 /* Grids don't have normals, can do early output. */
852                 return;
853         }
854         subdiv_ccg_recalc_inner_grid_normals(subdiv_ccg);
855         BKE_subdiv_ccg_average_grids(subdiv_ccg);
856 }
857
858 /* =============================================================================
859  * Boundary averaging/stitching.
860  */
861
862 typedef struct AverageInnerGridsData {
863         SubdivCCG *subdiv_ccg;
864         CCGKey *key;
865 } AverageInnerGridsData;
866
867 static void average_grid_element_value_v3(float a[3], float b[3])
868 {
869         add_v3_v3(a, b);
870         mul_v3_fl(a, 0.5f);
871         copy_v3_v3(b, a);
872 }
873
874 static void average_grid_element(SubdivCCG *subdiv_ccg,
875                                  CCGKey *key,
876                                  CCGElem *grid_element_a,
877                                  CCGElem *grid_element_b)
878 {
879         average_grid_element_value_v3(CCG_elem_co(key, grid_element_a),
880                                       CCG_elem_co(key, grid_element_b));
881         if (subdiv_ccg->has_normal) {
882                 average_grid_element_value_v3(CCG_elem_no(key, grid_element_a),
883                                               CCG_elem_no(key, grid_element_b));
884         }
885         if (subdiv_ccg->has_mask) {
886                 float mask =
887                         (*CCG_elem_mask(key, grid_element_a) +
888                          *CCG_elem_mask(key, grid_element_b)) * 0.5f;
889                 *CCG_elem_mask(key, grid_element_a) = mask;
890                 *CCG_elem_mask(key, grid_element_b) = mask;
891         }
892 }
893
894 static void copy_grid_element(SubdivCCG *subdiv_ccg,
895                               CCGKey *key,
896                               CCGElem *destination,
897                               CCGElem *source)
898 {
899         copy_v3_v3(CCG_elem_co(key, destination), CCG_elem_co(key, source));
900         if (subdiv_ccg->has_normal) {
901                 copy_v3_v3(CCG_elem_no(key, destination), CCG_elem_no(key, source));
902         }
903         if (subdiv_ccg->has_mask) {
904                 *CCG_elem_mask(key, destination) = *CCG_elem_mask(key, source);
905         }
906 }
907
908 static void subdiv_ccg_average_inner_face_grids(
909         SubdivCCG *subdiv_ccg,
910         CCGKey *key,
911         SubdivCCGFace *face)
912 {
913         CCGElem **grids = subdiv_ccg->grids;
914         const int num_face_grids = face->num_grids;
915         const int grid_size = subdiv_ccg->grid_size;
916         CCGElem *prev_grid = grids[face->start_grid_index + num_face_grids - 1];
917         for (int corner = 0; corner < num_face_grids; corner++) {
918                 CCGElem *grid = grids[face->start_grid_index + corner];
919                 for (int i = 0; i < grid_size; i++) {
920                         CCGElem *prev_grid_element = CCG_grid_elem(key, prev_grid, i, 0);
921                         CCGElem *grid_element = CCG_grid_elem(key, grid, 0, i);
922                         average_grid_element(
923                                 subdiv_ccg, key, prev_grid_element, grid_element);
924                 }
925                 prev_grid = grid;
926         }
927
928 }
929
930 static void subdiv_ccg_average_inner_grids_task(
931         void *__restrict userdata_v,
932         const int face_index,
933         const ParallelRangeTLS *__restrict UNUSED(tls_v))
934 {
935         AverageInnerGridsData *data = userdata_v;
936         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
937         CCGKey *key = data->key;
938         SubdivCCGFace *faces = subdiv_ccg->faces;
939         SubdivCCGFace *face = &faces[face_index];
940         subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
941 }
942
943 typedef struct AverageGridsBoundariesData {
944         SubdivCCG *subdiv_ccg;
945         CCGKey *key;
946 } AverageGridsBoundariesData;
947
948 static void subdiv_ccg_average_grids_boundary(
949         SubdivCCG *subdiv_ccg,
950         CCGKey *key,
951         SubdivCCGAdjacentEdge *adjacent_edge)
952 {
953         const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
954         const int grid_size2 = subdiv_ccg->grid_size * 2;
955         if (num_adjacent_faces == 1) {
956                 /* Nothing to average with. */
957                 return;
958         }
959         /* Incrementall average result to elements of a first adjacent face.
960          *
961          * Arguably, this is less precise than accumulating and then diving once,
962          * but on another hand this is more stable when coordinates are big. */
963         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
964                 /* NOTE: We ignore very first and very last elements, they correspond
965                  * to corner vertices, and they can belong to multiple edges.
966                  * The fact, that they can belong to multiple edges means we can't
967                  * safely average them.
968                  * The fact, that they correspond to a corner elements, means they will
969                  * be handled at the upcoming pass over corner elements. */
970                 for (int i = 1; i < grid_size2 - 1; i++) {
971                         CCGElem *grid_element_0 =
972                                 adjacent_edge->boundary_elements[0][i];
973                         CCGElem *grid_element_face_index =
974                                 adjacent_edge->boundary_elements[face_index][i];
975                         average_grid_element(subdiv_ccg,
976                                              key,
977                                              grid_element_0,
978                                              grid_element_face_index);
979                 }
980         }
981         /* Copy averaged value to all the other faces. */
982         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
983                 for (int i = 1; i < grid_size2 -1; i++) {
984                         CCGElem *grid_element_0 =
985                                 adjacent_edge->boundary_elements[0][i];
986                         CCGElem *grid_element_face_index =
987                                 adjacent_edge->boundary_elements[face_index][i];
988                         copy_grid_element(subdiv_ccg,
989                                           key,
990                                           grid_element_face_index,
991                                           grid_element_0);
992                 }
993         }
994 }
995
996 static void subdiv_ccg_average_grids_boundaries_task(
997         void *__restrict userdata_v,
998         const int adjacent_edge_index,
999         const ParallelRangeTLS *__restrict UNUSED(tls_v))
1000 {
1001         AverageGridsBoundariesData *data = userdata_v;
1002         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1003         CCGKey *key = data->key;
1004         SubdivCCGAdjacentEdge *adjacent_edge =
1005                 &subdiv_ccg->adjacent_edges[adjacent_edge_index];
1006         subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge);
1007 }
1008
1009 typedef struct AverageGridsCornerData {
1010         SubdivCCG *subdiv_ccg;
1011         CCGKey *key;
1012 } AverageGridsCornerData;
1013
1014 static void subdiv_ccg_average_grids_corners(
1015         SubdivCCG *subdiv_ccg,
1016         CCGKey *key,
1017         SubdivCCGAdjacentVertex *adjacent_vertex)
1018 {
1019         const int num_adjacent_faces = adjacent_vertex->num_adjacent_faces;
1020         if (num_adjacent_faces == 1) {
1021                 /* Nothing to average with. */
1022                 return;
1023         }
1024         /* Incrementall average result to elements of a first adjacent face.
1025          * See comment to the boundary averaging. */
1026         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
1027                 CCGElem *grid_element_0 =
1028                         adjacent_vertex->corner_elements[0];
1029                 CCGElem *grid_element_face_index =
1030                         adjacent_vertex->corner_elements[face_index];
1031                 average_grid_element(subdiv_ccg,
1032                                      key,
1033                                      grid_element_0,
1034                                      grid_element_face_index);
1035         }
1036         /* Copy averaged value to all the other faces. */
1037         for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
1038                 CCGElem *grid_element_0 =
1039                         adjacent_vertex->corner_elements[0];
1040                 CCGElem *grid_element_face_index =
1041                         adjacent_vertex->corner_elements[face_index];
1042                 copy_grid_element(subdiv_ccg,
1043                                   key,
1044                                   grid_element_face_index,
1045                                   grid_element_0);
1046         }
1047 }
1048
1049 static void subdiv_ccg_average_grids_corners_task(
1050         void *__restrict userdata_v,
1051         const int adjacent_vertex_index,
1052         const ParallelRangeTLS *__restrict UNUSED(tls_v))
1053 {
1054         AverageGridsCornerData *data = userdata_v;
1055         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1056         CCGKey *key = data->key;
1057         SubdivCCGAdjacentVertex *adjacent_vertex =
1058                 &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
1059         subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
1060 }
1061
1062 static void subdiv_ccg_average_all_boundaries_and_corners(
1063         SubdivCCG *subdiv_ccg,
1064         CCGKey *key)
1065 {
1066         ParallelRangeSettings parallel_range_settings;
1067         BLI_parallel_range_settings_defaults(&parallel_range_settings);
1068         /* Average grids across coarse edges. */
1069         AverageGridsBoundariesData boundaries_data = {
1070                 .subdiv_ccg = subdiv_ccg,
1071                 .key = key,
1072         };
1073         BLI_task_parallel_range(0, subdiv_ccg->num_adjacent_edges,
1074                                 &boundaries_data,
1075                                 subdiv_ccg_average_grids_boundaries_task,
1076                                 &parallel_range_settings);
1077         /* Average grids at coarse vertices. */
1078         AverageGridsCornerData corner_data = {
1079                 .subdiv_ccg = subdiv_ccg,
1080                 .key = key,
1081         };
1082         BLI_task_parallel_range(0, subdiv_ccg->num_adjacent_vertices,
1083                                 &corner_data,
1084                                 subdiv_ccg_average_grids_corners_task,
1085                                 &parallel_range_settings);
1086 }
1087
1088 void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
1089 {
1090         CCGKey key;
1091         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
1092         ParallelRangeSettings parallel_range_settings;
1093         BLI_parallel_range_settings_defaults(&parallel_range_settings);
1094         /* Average inner boundaries of grids (within one face), across faces
1095          * from different face-corners. */
1096         AverageInnerGridsData inner_data = {
1097                 .subdiv_ccg = subdiv_ccg,
1098                 .key = &key,
1099         };
1100         BLI_task_parallel_range(0, subdiv_ccg->num_faces,
1101                                 &inner_data,
1102                                 subdiv_ccg_average_inner_grids_task,
1103                                 &parallel_range_settings);
1104         subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
1105 }
1106
1107 typedef struct StitchFacesInnerGridsData {
1108         SubdivCCG *subdiv_ccg;
1109         CCGKey *key;
1110         struct CCGFace **effected_ccg_faces;
1111 } StitchFacesInnerGridsData;
1112
1113 static void subdiv_ccg_stitch_face_inner_grids_task(
1114         void *__restrict userdata_v,
1115         const int face_index,
1116         const ParallelRangeTLS *__restrict UNUSED(tls_v))
1117 {
1118         StitchFacesInnerGridsData *data = userdata_v;
1119         SubdivCCG *subdiv_ccg = data->subdiv_ccg;
1120         CCGKey *key = data->key;
1121         struct CCGFace **effected_ccg_faces = data->effected_ccg_faces;
1122         struct CCGFace *effected_ccg_face = effected_ccg_faces[face_index];
1123         SubdivCCGFace *face = (SubdivCCGFace *)effected_ccg_face;
1124         subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
1125 }
1126
1127 void BKE_subdiv_ccg_average_stitch_faces(SubdivCCG *subdiv_ccg,
1128                                          struct CCGFace **effected_faces,
1129                                          int num_effected_faces)
1130 {
1131         CCGKey key;
1132         BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
1133         StitchFacesInnerGridsData data = {
1134                 .subdiv_ccg = subdiv_ccg,
1135                 .key = &key,
1136                 .effected_ccg_faces = effected_faces,
1137         };
1138         ParallelRangeSettings parallel_range_settings;
1139         BLI_parallel_range_settings_defaults(&parallel_range_settings);
1140         BLI_task_parallel_range(0, num_effected_faces,
1141                                 &data,
1142                                 subdiv_ccg_stitch_face_inner_grids_task,
1143                                 &parallel_range_settings);
1144         /* TODO(sergey): Only average elements which are adjacent to modified
1145          * faces. */
1146         subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
1147 }
1148
1149 void BKE_subdiv_ccg_topology_counters(
1150         const SubdivCCG *subdiv_ccg,
1151         int *r_num_vertices, int *r_num_edges,
1152         int *r_num_faces, int *r_num_loops)
1153 {
1154         const int num_grids = subdiv_ccg->num_grids;
1155         const int grid_size = subdiv_ccg->grid_size;
1156         const int grid_area = grid_size * grid_size;
1157         const int num_edges_per_grid = 2 * (grid_size * (grid_size - 1));
1158         *r_num_vertices = num_grids * grid_area;
1159         *r_num_edges = num_grids * num_edges_per_grid;
1160         *r_num_faces = num_grids * (grid_size - 1) * (grid_size - 1);
1161         *r_num_loops = *r_num_faces * 4;
1162 }