2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 /** \file blender/editors/mesh/editmesh_undo.c
21 #include "MEM_guardedalloc.h"
25 #include "DNA_mesh_types.h"
26 #include "DNA_meshdata_types.h"
27 #include "DNA_object_types.h"
28 #include "DNA_key_types.h"
29 #include "DNA_layer_types.h"
31 #include "BLI_listbase.h"
32 #include "BLI_array_utils.h"
34 #include "BKE_context.h"
36 #include "BKE_layer.h"
38 #include "BKE_editmesh.h"
39 #include "BKE_undo_system.h"
41 #include "DEG_depsgraph.h"
43 #include "ED_object.h"
51 #define USE_ARRAY_STORE
53 #ifdef USE_ARRAY_STORE
54 // # define DEBUG_PRINT
55 // # define DEBUG_TIME
57 # include "PIL_time_utildefines.h"
60 # include "BLI_array_store.h"
61 # include "BLI_array_store_utils.h"
62 /* check on best size later... */
63 # define ARRAY_CHUNK_SIZE 256
65 # define USE_ARRAY_STORE_THREAD
68 #ifdef USE_ARRAY_STORE_THREAD
69 # include "BLI_task.h"
72 /** We only need this locally. */
73 static CLG_LogRef LOG = {"ed.undo.mesh"};
75 /* -------------------------------------------------------------------- */
76 /** \name Undo Conversion
79 #ifdef USE_ARRAY_STORE
81 /* Single linked list of layers stored per type */
82 typedef struct BArrayCustomData {
83 struct BArrayCustomData *next;
85 int states_len; /* number of layers for each type */
86 BArrayState *states[0];
91 typedef struct UndoMesh {
96 * this isn't a prefect solution, if you edit keys and change shapes this works well (fixing [#32442]),
97 * but editing shape keys, going into object mode, removing or changing their order,
98 * then go back into editmode and undo will give issues - where the old index will be out of sync
99 * with the new object index.
101 * There are a few ways this could be made to work but for now its a known limitation with mixing
102 * object and editmode operations - Campbell */
105 #ifdef USE_ARRAY_STORE
106 /* NULL arrays are considered empty */
107 struct { /* most data is stored as 'custom' data */
108 BArrayCustomData *vdata, *edata, *ldata, *pdata;
109 BArrayState **keyblocks;
110 BArrayState *mselect;
112 #endif /* USE_ARRAY_STORE */
118 #ifdef USE_ARRAY_STORE
120 /** \name Array Store
124 struct BArrayStore_AtSize bs_stride;
127 /* We could have the undo API pass in the previous state, for now store a local list */
128 ListBase local_links;
130 #ifdef USE_ARRAY_STORE_THREAD
134 } um_arraystore = {{NULL}};
136 static void um_arraystore_cd_compact(
137 struct CustomData *cdata, const size_t data_len,
139 const BArrayCustomData *bcd_reference,
140 BArrayCustomData **r_bcd_first)
148 const BArrayCustomData *bcd_reference_current = bcd_reference;
149 BArrayCustomData *bcd = NULL, *bcd_first = NULL, *bcd_prev = NULL;
150 for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
151 const CustomDataType type = cdata->layers[layer_start].type;
153 layer_end = layer_start + 1;
154 while ((layer_end < cdata->totlayer) &&
155 (type == cdata->layers[layer_end].type))
160 const int stride = CustomData_sizeof(type);
161 BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride, stride, ARRAY_CHUNK_SIZE) : NULL;
162 const int layer_len = layer_end - layer_start;
165 if (bcd_reference_current && (bcd_reference_current->type == type)) {
166 /* common case, the reference is aligned */
169 bcd_reference_current = NULL;
171 /* do a full lookup when un-alligned */
173 const BArrayCustomData *bcd_iter = bcd_reference;
175 if (bcd_iter->type == type) {
176 bcd_reference_current = bcd_iter;
179 bcd_iter = bcd_iter->next;
186 bcd = MEM_callocN(sizeof(BArrayCustomData) + (layer_len * sizeof(BArrayState *)), __func__);
189 bcd->states_len = layer_end - layer_start;
192 bcd_prev->next = bcd;
201 CustomDataLayer *layer = &cdata->layers[layer_start];
202 for (int i = 0; i < layer_len; i++, layer++) {
205 BArrayState *state_reference =
206 (bcd_reference_current && i < bcd_reference_current->states_len) ?
207 bcd_reference_current->states[i] : NULL;
208 bcd->states[i] = BLI_array_store_state_add(
209 bs, layer->data, (size_t)data_len * stride, state_reference);
212 bcd->states[i] = NULL;
217 MEM_freeN(layer->data);
223 if (bcd_reference_current) {
224 bcd_reference_current = bcd_reference_current->next;
230 *r_bcd_first = bcd_first;
235 * \note There is no room for data going out of sync here.
236 * The layers and the states are stored together so this can be kept working.
238 static void um_arraystore_cd_expand(
239 const BArrayCustomData *bcd, struct CustomData *cdata, const size_t data_len)
241 CustomDataLayer *layer = cdata->layers;
243 const int stride = CustomData_sizeof(bcd->type);
244 for (int i = 0; i < bcd->states_len; i++) {
245 BLI_assert(bcd->type == layer->type);
246 if (bcd->states[i]) {
248 layer->data = BLI_array_store_state_data_get_alloc(bcd->states[i], &state_len);
249 BLI_assert(stride * data_len == state_len);
250 UNUSED_VARS_NDEBUG(stride, data_len);
261 static void um_arraystore_cd_free(BArrayCustomData *bcd)
264 BArrayCustomData *bcd_next = bcd->next;
265 const int stride = CustomData_sizeof(bcd->type);
266 BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride, stride);
267 for (int i = 0; i < bcd->states_len; i++) {
268 if (bcd->states[i]) {
269 BLI_array_store_state_remove(bs, bcd->states[i]);
278 * \param create: When false, only free the arrays.
279 * This is done since when reading from an undo state, they must be temporarily expanded.
280 * then discarded afterwards, having this argument avoids having 2x code paths.
282 static void um_arraystore_compact_ex(
283 UndoMesh *um, const UndoMesh *um_ref,
288 um_arraystore_cd_compact(&me->vdata, me->totvert, create, um_ref ? um_ref->store.vdata : NULL, &um->store.vdata);
289 um_arraystore_cd_compact(&me->edata, me->totedge, create, um_ref ? um_ref->store.edata : NULL, &um->store.edata);
290 um_arraystore_cd_compact(&me->ldata, me->totloop, create, um_ref ? um_ref->store.ldata : NULL, &um->store.ldata);
291 um_arraystore_cd_compact(&me->pdata, me->totpoly, create, um_ref ? um_ref->store.pdata : NULL, &um->store.pdata);
293 if (me->key && me->key->totkey) {
294 const size_t stride = me->key->elemsize;
295 BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride, stride, ARRAY_CHUNK_SIZE) : NULL;
297 um->store.keyblocks = MEM_mallocN(me->key->totkey * sizeof(*um->store.keyblocks), __func__);
299 KeyBlock *keyblock = me->key->block.first;
300 for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
302 BArrayState *state_reference =
303 (um_ref && um_ref->me.key && (i < um_ref->me.key->totkey)) ?
304 um_ref->store.keyblocks[i] : NULL;
305 um->store.keyblocks[i] = BLI_array_store_state_add(
306 bs, keyblock->data, (size_t)keyblock->totelem * stride,
310 if (keyblock->data) {
311 MEM_freeN(keyblock->data);
312 keyblock->data = NULL;
317 if (me->mselect && me->totselect) {
318 BLI_assert(create == (um->store.mselect == NULL));
320 BArrayState *state_reference = um_ref ? um_ref->store.mselect : NULL;
321 const size_t stride = sizeof(*me->mselect);
322 BArrayStore *bs = BLI_array_store_at_size_ensure(&um_arraystore.bs_stride, stride, ARRAY_CHUNK_SIZE);
323 um->store.mselect = BLI_array_store_state_add(
324 bs, me->mselect, (size_t)me->totselect * stride, state_reference);
327 /* keep me->totselect for validation */
328 MEM_freeN(me->mselect);
333 um_arraystore.users += 1;
336 BKE_mesh_update_customdata_pointers(me, false);
340 * Move data from allocated arrays to de-duplicated states and clear arrays.
342 static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
344 um_arraystore_compact_ex(um, um_ref, true);
347 static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
350 size_t size_expanded_prev, size_compacted_prev;
351 BLI_array_store_at_size_calc_memory_usage(&um_arraystore.bs_stride, &size_expanded_prev, &size_compacted_prev);
355 TIMEIT_START(mesh_undo_compact);
358 um_arraystore_compact(um, um_ref);
361 TIMEIT_END(mesh_undo_compact);
366 size_t size_expanded, size_compacted;
367 BLI_array_store_at_size_calc_memory_usage(&um_arraystore.bs_stride, &size_expanded, &size_compacted);
369 const double percent_total = size_expanded ?
370 (((double)size_compacted / (double)size_expanded) * 100.0) : -1.0;
372 size_t size_expanded_step = size_expanded - size_expanded_prev;
373 size_t size_compacted_step = size_compacted - size_compacted_prev;
374 const double percent_step = size_expanded_step ?
375 (((double)size_compacted_step / (double)size_expanded_step) * 100.0) : -1.0;
377 printf("overall memory use: %.8f%% of expanded size\n", percent_total);
378 printf("step memory use: %.8f%% of expanded size\n", percent_step);
383 #ifdef USE_ARRAY_STORE_THREAD
387 const UndoMesh *um_ref; /* can be NULL */
389 static void um_arraystore_compact_cb(TaskPool *__restrict UNUSED(pool),
391 int UNUSED(threadid))
393 struct UMArrayData *um_data = taskdata;
394 um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
397 #endif /* USE_ARRAY_STORE_THREAD */
400 * Remove data we only expanded for temporary use.
402 static void um_arraystore_expand_clear(UndoMesh *um)
404 um_arraystore_compact_ex(um, NULL, false);
407 static void um_arraystore_expand(UndoMesh *um)
411 um_arraystore_cd_expand(um->store.vdata, &me->vdata, me->totvert);
412 um_arraystore_cd_expand(um->store.edata, &me->edata, me->totedge);
413 um_arraystore_cd_expand(um->store.ldata, &me->ldata, me->totloop);
414 um_arraystore_cd_expand(um->store.pdata, &me->pdata, me->totpoly);
416 if (um->store.keyblocks) {
417 const size_t stride = me->key->elemsize;
418 KeyBlock *keyblock = me->key->block.first;
419 for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
420 BArrayState *state = um->store.keyblocks[i];
422 keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
423 BLI_assert(keyblock->totelem == (state_len / stride));
424 UNUSED_VARS_NDEBUG(stride);
428 if (um->store.mselect) {
429 const size_t stride = sizeof(*me->mselect);
430 BArrayState *state = um->store.mselect;
432 me->mselect = BLI_array_store_state_data_get_alloc(state, &state_len);
433 BLI_assert(me->totselect == (state_len / stride));
434 UNUSED_VARS_NDEBUG(stride);
437 /* not essential, but prevents accidental dangling pointer access */
438 BKE_mesh_update_customdata_pointers(me, false);
441 static void um_arraystore_free(UndoMesh *um)
445 um_arraystore_cd_free(um->store.vdata);
446 um_arraystore_cd_free(um->store.edata);
447 um_arraystore_cd_free(um->store.ldata);
448 um_arraystore_cd_free(um->store.pdata);
450 if (um->store.keyblocks) {
451 const size_t stride = me->key->elemsize;
452 BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride, stride);
453 for (int i = 0; i < me->key->totkey; i++) {
454 BArrayState *state = um->store.keyblocks[i];
455 BLI_array_store_state_remove(bs, state);
457 MEM_freeN(um->store.keyblocks);
458 um->store.keyblocks = NULL;
461 if (um->store.mselect) {
462 const size_t stride = sizeof(*me->mselect);
463 BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride, stride);
464 BArrayState *state = um->store.mselect;
465 BLI_array_store_state_remove(bs, state);
466 um->store.mselect = NULL;
469 um_arraystore.users -= 1;
471 BLI_assert(um_arraystore.users >= 0);
473 if (um_arraystore.users == 0) {
475 printf("mesh undo store: freeing all data!\n");
477 BLI_array_store_at_size_clear(&um_arraystore.bs_stride);
479 #ifdef USE_ARRAY_STORE_THREAD
480 BLI_task_pool_free(um_arraystore.task_pool);
481 um_arraystore.task_pool = NULL;
489 #endif /* USE_ARRAY_STORE */
493 /* undo simply makes copies of a bmesh */
494 static void *undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key)
496 BLI_assert(BLI_array_is_zeroed(um, 1));
497 #ifdef USE_ARRAY_STORE_THREAD
498 /* changes this waits is low, but must have finished */
499 if (um_arraystore.task_pool) {
500 BLI_task_pool_work_and_wait(um_arraystore.task_pool);
503 /* make sure shape keys work */
504 um->me.key = key ? BKE_key_copy_nolib(key) : NULL;
506 /* BM_mesh_validate(em->bm); */ /* for troubleshooting */
509 NULL, em->bm, &um->me, (&(struct BMeshToMeshParams){
510 /* Undo code should not be manipulating 'G_MAIN->object' hooks/vertex-parent. */
511 .calc_object_remap = false,
512 .cd_mask_extra = CD_MASK_SHAPE_KEYINDEX,
515 um->selectmode = em->selectmode;
516 um->shapenr = em->bm->shapenr;
518 #ifdef USE_ARRAY_STORE
520 /* We could be more clever here,
521 * the previous undo state may be from a separate mesh. */
522 const UndoMesh *um_ref = um_arraystore.local_links.last ?
523 ((LinkData *)um_arraystore.local_links.last)->data : NULL;
526 BLI_addtail(&um_arraystore.local_links, BLI_genericNodeN(um));
528 #ifdef USE_ARRAY_STORE_THREAD
529 if (um_arraystore.task_pool == NULL) {
530 TaskScheduler *scheduler = BLI_task_scheduler_get();
531 um_arraystore.task_pool = BLI_task_pool_create_background(scheduler, NULL);
534 struct UMArrayData *um_data = MEM_mallocN(sizeof(*um_data), __func__);
536 um_data->um_ref = um_ref;
539 um_arraystore.task_pool,
540 um_arraystore_compact_cb, um_data, true, TASK_PRIORITY_LOW);
542 um_arraystore_compact_with_info(um, um_ref);
550 static void undomesh_to_editmesh(UndoMesh *um, BMEditMesh *em, Mesh *obmesh)
555 Key *key = obmesh->key;
557 #ifdef USE_ARRAY_STORE
558 #ifdef USE_ARRAY_STORE_THREAD
559 /* changes this waits is low, but must have finished */
560 BLI_task_pool_work_and_wait(um_arraystore.task_pool);
564 TIMEIT_START(mesh_undo_expand);
567 um_arraystore_expand(um);
570 TIMEIT_END(mesh_undo_expand);
572 #endif /* USE_ARRAY_STORE */
574 const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(&um->me);
576 em->bm->shapenr = um->shapenr;
582 &((struct BMeshCreateParams){.use_toolflags = true,}));
585 bm, &um->me, (&(struct BMeshFromMeshParams){
586 .calc_face_normal = true, .active_shapekey = um->shapenr,
589 em_tmp = BKE_editmesh_create(bm, true);
592 em->selectmode = um->selectmode;
593 bm->selectmode = um->selectmode;
596 bm->spacearr_dirty = BM_SPACEARR_DIRTY_ALL;
598 /* T35170: Restore the active key on the RealMesh. Otherwise 'fake' offset propagation happens
599 * if the active is a basis for any other. */
600 if (key && (key->type == KEY_RELATIVE)) {
601 /* Since we can't add, remove or reorder keyblocks in editmode, it's safe to assume
602 * shapenr from restored bmesh and keyblock indices are in sync. */
603 const int kb_act_idx = ob->shapenr - 1;
605 /* If it is, let's patch the current mesh key block to its restored value.
606 * Else, the offsets won't be computed and it won't matter. */
607 if (BKE_keyblock_is_basis(key, kb_act_idx)) {
608 KeyBlock *kb_act = BLI_findlink(&key->block, kb_act_idx);
610 if (kb_act->totelem != um->me.totvert) {
611 /* The current mesh has some extra/missing verts compared to the undo, adjust. */
612 MEM_SAFE_FREE(kb_act->data);
613 kb_act->data = MEM_mallocN((size_t)(key->elemsize * bm->totvert), __func__);
614 kb_act->totelem = um->me.totvert;
617 BKE_keyblock_update_from_mesh(&um->me, kb_act);
621 ob->shapenr = um->shapenr;
625 #ifdef USE_ARRAY_STORE
626 um_arraystore_expand_clear(um);
630 static void undomesh_free_data(UndoMesh *um)
634 #ifdef USE_ARRAY_STORE
636 #ifdef USE_ARRAY_STORE_THREAD
637 /* changes this waits is low, but must have finished */
638 BLI_task_pool_work_and_wait(um_arraystore.task_pool);
641 /* we need to expand so any allocations in custom-data are freed with the mesh */
642 um_arraystore_expand(um);
645 LinkData *link = BLI_findptr(&um_arraystore.local_links, um, offsetof(LinkData, data));
646 BLI_remlink(&um_arraystore.local_links, link);
649 um_arraystore_free(um);
653 BKE_key_free(me->key);
660 static Object *editmesh_object_from_context(bContext *C)
662 Object *obedit = CTX_data_edit_object(C);
663 if (obedit && obedit->type == OB_MESH) {
664 Mesh *me = obedit->data;
665 if (me->edit_btmesh != NULL) {
674 /* -------------------------------------------------------------------- */
675 /** \name Implements ED Undo System
677 * \note This is similar for all edit-mode types.
680 typedef struct MeshUndoStep_Elem {
681 struct MeshUndoStep_Elem *next, *prev;
682 UndoRefID_Object obedit_ref;
686 typedef struct MeshUndoStep {
688 struct UndoIDPtrMap *id_map;
689 MeshUndoStep_Elem *elems;
693 static bool mesh_undosys_poll(bContext *C)
695 return editmesh_object_from_context(C) != NULL;
698 static bool mesh_undosys_step_encode(struct bContext *C, struct Main *UNUSED(bmain), UndoStep *us_p)
700 MeshUndoStep *us = (MeshUndoStep *)us_p;
702 ViewLayer *view_layer = CTX_data_view_layer(C);
703 uint objects_len = 0;
704 Object **objects = BKE_view_layer_array_from_objects_in_edit_mode_unique_data(view_layer, CTX_wm_view3d(C), &objects_len);
706 us->elems = MEM_callocN(sizeof(*us->elems) * objects_len, __func__);
707 us->elems_len = objects_len;
709 for (uint i = 0; i < objects_len; i++) {
710 Object *ob = objects[i];
711 MeshUndoStep_Elem *elem = &us->elems[i];
713 elem->obedit_ref.ptr = ob;
714 Mesh *me = elem->obedit_ref.ptr->data;
715 undomesh_from_editmesh(&elem->data, me->edit_btmesh, me->key);
716 us->step.data_size += elem->data.undo_size;
722 static void mesh_undosys_step_decode(struct bContext *C, struct Main *bmain, UndoStep *us_p, int UNUSED(dir))
724 MeshUndoStep *us = (MeshUndoStep *)us_p;
726 Scene *scene = CTX_data_scene(C);
727 for (uint i = 0; i < us->elems_len; i++) {
728 MeshUndoStep_Elem *elem = &us->elems[i];
729 Object *obedit = elem->obedit_ref.ptr;
730 ED_object_editmode_enter_ex(bmain, scene, obedit, EM_NO_CONTEXT);
733 BLI_assert(mesh_undosys_poll(C));
735 for (uint i = 0; i < us->elems_len; i++) {
736 MeshUndoStep_Elem *elem = &us->elems[i];
737 Object *obedit = elem->obedit_ref.ptr;
738 Mesh *me = obedit->data;
739 if (me->edit_btmesh == NULL) {
740 /* Should never fail, may not crash but can give odd behavior. */
741 CLOG_ERROR(&LOG, "name='%s', failed to enter edit-mode for object '%s', undo state invalid",
742 us_p->name, obedit->id.name);
745 BMEditMesh *em = me->edit_btmesh;
746 undomesh_to_editmesh(&elem->data, em, obedit->data);
747 DEG_id_tag_update(&obedit->id, ID_RECALC_GEOMETRY);
750 /* The first element is always active */
751 ED_undo_object_set_active_or_warn(CTX_data_view_layer(C), us->elems[0].obedit_ref.ptr, us_p->name, &LOG);
753 WM_event_add_notifier(C, NC_GEOM | ND_DATA, NULL);
756 static void mesh_undosys_step_free(UndoStep *us_p)
758 MeshUndoStep *us = (MeshUndoStep *)us_p;
760 for (uint i = 0; i < us->elems_len; i++) {
761 MeshUndoStep_Elem *elem = &us->elems[i];
762 undomesh_free_data(&elem->data);
764 MEM_freeN(us->elems);
766 if (us->id_map != NULL) {
767 BKE_undosys_ID_map_destroy(us->id_map);
771 static void mesh_undosys_foreach_ID_ref(
772 UndoStep *us_p, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
774 MeshUndoStep *us = (MeshUndoStep *)us_p;
776 for (uint i = 0; i < us->elems_len; i++) {
777 MeshUndoStep_Elem *elem = &us->elems[i];
778 foreach_ID_ref_fn(user_data, ((UndoRefID *)&elem->obedit_ref));
781 if (us->id_map != NULL) {
782 BKE_undosys_ID_map_foreach_ID_ref(us->id_map, foreach_ID_ref_fn, user_data);
786 /* Export for ED_undo_sys. */
787 void ED_mesh_undosys_type(UndoType *ut)
789 ut->name = "Edit Mesh";
790 ut->poll = mesh_undosys_poll;
791 ut->step_encode = mesh_undosys_step_encode;
792 ut->step_decode = mesh_undosys_step_decode;
793 ut->step_free = mesh_undosys_step_free;
795 ut->step_foreach_ID_ref = mesh_undosys_foreach_ID_ref;
797 ut->use_context = true;
799 ut->step_size = sizeof(MeshUndoStep);