Atomics: Make naming more obvious about which value is being returned
authorSergey Sharybin <sergey.vfx@gmail.com>
Tue, 15 Nov 2016 11:16:26 +0000 (12:16 +0100)
committerSergey Sharybin <sergey.vfx@gmail.com>
Tue, 15 Nov 2016 11:16:26 +0000 (12:16 +0100)
18 files changed:
intern/atomic/atomic_ops.h
intern/atomic/intern/atomic_ops_ext.h
intern/atomic/intern/atomic_ops_msvc.h
intern/atomic/intern/atomic_ops_unix.h
intern/cycles/kernel/kernel_passes.h
intern/cycles/util/util_atomic.h
intern/cycles/util/util_stats.h
intern/guardedalloc/intern/mallocn_guarded_impl.c
intern/guardedalloc/intern/mallocn_lockfree_impl.c
source/blender/blenkernel/intern/depsgraph.c
source/blender/blenkernel/intern/dynamicpaint.c
source/blender/blenkernel/intern/mesh_evaluate.c
source/blender/blenkernel/intern/pbvh.c
source/blender/blenlib/intern/task.c
source/blender/compositor/intern/COM_ExecutionGroup.cpp
source/blender/depsgraph/intern/eval/deg_eval.cc
source/blender/editors/space_file/filelist.c
source/gameengine/VideoTexture/VideoDeckLink.cpp

index f78eab7951fe07f635987e595f4ca611514b205d..c3926fdd68fb4a16f9d8c9556f143274724c5a12 100644 (file)
 /* Function prototypes. */
 
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x);
 ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new);
 #endif
 
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x);
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x);
 ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new);
 
 ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x);
@@ -93,18 +93,18 @@ ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x);
 ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b);
 ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b);
 
-ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x);
-ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
 ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
 
-ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x);
-ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x);
 ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new);
 
 /* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,
  *          which means they are only efficient if collisions are highly unlikely (i.e. if probability of two threads
  *          working on the same pointer at the same time is very low). */
-ATOMIC_INLINE float atomic_add_fl(float *p, const float x);
+ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x);
 
 /******************************************************************************/
 /* Include system-dependent implementations. */
index 4065299d2ea755bfe5cbb352396d7e7ddfca6ad1..74ed327c1b7e5bd2c00500dbc29c18c2c02c8046 100644 (file)
 
 /******************************************************************************/
 /* size_t operations. */
-ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x)
+ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
 {
        assert(sizeof(size_t) == LG_SIZEOF_PTR);
 
 #if (LG_SIZEOF_PTR == 8)
-       return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
+       return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
 #elif (LG_SIZEOF_PTR == 4)
-       return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
+       return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
 #endif
 }
 
-ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x)
+ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
 {
        assert(sizeof(size_t) == LG_SIZEOF_PTR);
 
 #if (LG_SIZEOF_PTR == 8)
-       return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+       return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
 #elif (LG_SIZEOF_PTR == 4)
-       return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+       return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
 #endif
 }
 
@@ -91,25 +91,25 @@ ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
 
 /******************************************************************************/
 /* unsigned operations. */
-ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x)
+ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x)
 {
        assert(sizeof(unsigned) == LG_SIZEOF_INT);
 
 #if (LG_SIZEOF_INT == 8)
-       return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
+       return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
 #elif (LG_SIZEOF_INT == 4)
-       return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
+       return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
 #endif
 }
 
-ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x)
+ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x)
 {
        assert(sizeof(unsigned) == LG_SIZEOF_INT);
 
 #if (LG_SIZEOF_INT == 8)
-       return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+       return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
 #elif (LG_SIZEOF_INT == 4)
-       return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+       return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
 #endif
 }
 
@@ -127,7 +127,7 @@ ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new)
 /******************************************************************************/
 /* float operations. */
 
-ATOMIC_INLINE float atomic_add_fl(float *p, const float x)
+ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
 {
        assert(sizeof(float) == sizeof(uint32_t));
 
index 3461719a4e7bb183e1f8361384bc086c308c88d7..e7aae4a74a0d90283fe789b239a3b9acc2d8f0ad 100644 (file)
 /******************************************************************************/
 /* 64-bit operations. */
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
        return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
 }
 
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
        return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
 }
@@ -61,12 +61,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
 
 /******************************************************************************/
 /* 32-bit operations. */
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
        return InterlockedExchangeAdd(p, x) + x;
 }
 
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
        return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
 }
index e63f09c76c503949840baed7477f402cb4a5b854..3d00f91be25b8d685852fe70e68c45ed2ff4d2cf 100644 (file)
 /* 64-bit operations. */
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
 #  if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
        return __sync_add_and_fetch(p, x);
 }
 
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
        return __sync_sub_and_fetch(p, x);
 }
@@ -73,7 +73,7 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
        return __sync_val_compare_and_swap(v, old, _new);
 }
 #  elif (defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
        asm volatile (
            "lock; xaddq %0, %1;"
@@ -83,7 +83,7 @@ ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
        return x;
 }
 
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
        x = (uint64_t)(-(int64_t)x);
        asm volatile (
@@ -112,12 +112,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
 /******************************************************************************/
 /* 32-bit operations. */
 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
        return __sync_add_and_fetch(p, x);
 }
 
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
        return __sync_sub_and_fetch(p, x);
 }
@@ -127,7 +127,7 @@ ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _ne
    return __sync_val_compare_and_swap(v, old, _new);
 }
 #elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
        uint32_t ret = x;
        asm volatile (
@@ -138,7 +138,7 @@ ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
        return ret+x;
 }
 
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
        ret = (uint32_t)(-(int32_t)x);
        asm volatile (
index 20cf3fa931bf974fa94b26bff203b643dba5bb39..7aec47e49571b303caae0db50eac2529bbf6a96d 100644 (file)
@@ -20,7 +20,7 @@ ccl_device_inline void kernel_write_pass_float(ccl_global float *buffer, int sam
 {
        ccl_global float *buf = buffer;
 #if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
-       atomic_add_float(buf, value);
+       atomic_add_and_fetch_float(buf, value);
 #else
        *buf = (sample == 0)? value: *buf + value;
 #endif // __SPLIT_KERNEL__ && __WORK_STEALING__
@@ -33,9 +33,9 @@ ccl_device_inline void kernel_write_pass_float3(ccl_global float *buffer, int sa
        ccl_global float *buf_y = buffer + 1;
        ccl_global float *buf_z = buffer + 2;
 
-       atomic_add_float(buf_x, value.x);
-       atomic_add_float(buf_y, value.y);
-       atomic_add_float(buf_z, value.z);
+       atomic_add_and_fetch_float(buf_x, value.x);
+       atomic_add_and_fetch_float(buf_y, value.y);
+       atomic_add_and_fetch_float(buf_z, value.z);
 #else
        ccl_global float3 *buf = (ccl_global float3*)buffer;
        *buf = (sample == 0)? value: *buf + value;
@@ -50,10 +50,10 @@ ccl_device_inline void kernel_write_pass_float4(ccl_global float *buffer, int sa
        ccl_global float *buf_z = buffer + 2;
        ccl_global float *buf_w = buffer + 3;
 
-       atomic_add_float(buf_x, value.x);
-       atomic_add_float(buf_y, value.y);
-       atomic_add_float(buf_z, value.z);
-       atomic_add_float(buf_w, value.w);
+       atomic_add_and_fetch_float(buf_x, value.x);
+       atomic_add_and_fetch_float(buf_y, value.y);
+       atomic_add_and_fetch_float(buf_z, value.z);
+       atomic_add_and_fetch_float(buf_w, value.w);
 #else
        ccl_global float4 *buf = (ccl_global float4*)buffer;
        *buf = (sample == 0)? value: *buf + value;
index 1d1e296334804932ddb41c7133ef625e5cecd40a..433e41fbbb6fb1f4abbed2ef7dd80ad267e36018 100644 (file)
@@ -39,7 +39,7 @@ ATOMIC_INLINE void atomic_update_max_z(size_t *maximum_value, size_t value)
 /* Float atomics implementation credits:
  *   http://suhorukov.blogspot.in/2011/12/opencl-11-atomic-operations-on-floating.html
  */
-ccl_device_inline void atomic_add_float(volatile ccl_global float *source,
+ccl_device_inline void atomic_add_and_fetch_float(volatile ccl_global float *source,
                                         const float operand)
 {
        union {
index b970b017270540672a4f57e44912f79c18600025..c21a8488c819e5f21b6b25503e18752218ab39e0 100644 (file)
@@ -29,13 +29,13 @@ public:
        explicit Stats(static_init_t) {}
 
        void mem_alloc(size_t size) {
-               atomic_add_z(&mem_used, size);
+               atomic_add_and_fetch_z(&mem_used, size);
                atomic_update_max_z(&mem_peak, mem_used);
        }
 
        void mem_free(size_t size) {
                assert(mem_used >= size);
-               atomic_sub_z(&mem_used, size);
+               atomic_sub_and_fetch_z(&mem_used, size);
        }
 
        size_t mem_used;
index 1933e9d3ee372bc4f1f5b1556c2353093111a49d..76b7e072321221eda89429812a3b7c0ad3556c5a 100644 (file)
@@ -505,8 +505,8 @@ static void make_memhead_header(MemHead *memh, size_t len, const char *str)
        memt = (MemTail *)(((char *) memh) + sizeof(MemHead) + len);
        memt->tag3 = MEMTAG3;
 
-       atomic_add_u(&totblock, 1);
-       atomic_add_z(&mem_in_use, len);
+       atomic_add_and_fetch_u(&totblock, 1);
+       atomic_add_and_fetch_z(&mem_in_use, len);
 
        mem_lock_thread();
        addtail(membase, &memh->next);
@@ -638,7 +638,7 @@ void *MEM_guarded_mapallocN(size_t len, const char *str)
        if (memh != (MemHead *)-1) {
                make_memhead_header(memh, len, str);
                memh->mmap = 1;
-               atomic_add_z(&mmap_in_use, len);
+               atomic_add_and_fetch_z(&mmap_in_use, len);
                mem_lock_thread();
                peak_mem = mmap_in_use > peak_mem ? mmap_in_use : peak_mem;
                mem_unlock_thread();
@@ -1007,8 +1007,8 @@ static void rem_memblock(MemHead *memh)
        }
        mem_unlock_thread();
 
-       atomic_sub_u(&totblock, 1);
-       atomic_sub_z(&mem_in_use, memh->len);
+       atomic_sub_and_fetch_u(&totblock, 1);
+       atomic_sub_and_fetch_z(&mem_in_use, memh->len);
 
 #ifdef DEBUG_MEMDUPLINAME
        if (memh->need_free_name)
@@ -1016,7 +1016,7 @@ static void rem_memblock(MemHead *memh)
 #endif
 
        if (memh->mmap) {
-               atomic_sub_z(&mmap_in_use, memh->len);
+               atomic_sub_and_fetch_z(&mmap_in_use, memh->len);
 #if defined(WIN32)
                /* our windows mmap implementation is not thread safe */
                mem_lock_thread();
index a80d67c3e80c53928fe59fa25305985578da7c02..ce8a5b29eceb898507076b5d209f099caf9f3b8a 100644 (file)
@@ -142,11 +142,11 @@ void MEM_lockfree_freeN(void *vmemh)
                return;
        }
 
-       atomic_sub_u(&totblock, 1);
-       atomic_sub_z(&mem_in_use, len);
+       atomic_sub_and_fetch_u(&totblock, 1);
+       atomic_sub_and_fetch_z(&mem_in_use, len);
 
        if (MEMHEAD_IS_MMAP(memh)) {
-               atomic_sub_z(&mmap_in_use, len);
+               atomic_sub_and_fetch_z(&mmap_in_use, len);
 #if defined(WIN32)
                /* our windows mmap implementation is not thread safe */
                mem_lock_thread();
@@ -287,8 +287,8 @@ void *MEM_lockfree_callocN(size_t len, const char *str)
 
        if (LIKELY(memh)) {
                memh->len = len;
-               atomic_add_u(&totblock, 1);
-               atomic_add_z(&mem_in_use, len);
+               atomic_add_and_fetch_u(&totblock, 1);
+               atomic_add_and_fetch_z(&mem_in_use, len);
                update_maximum(&peak_mem, mem_in_use);
 
                return PTR_FROM_MEMHEAD(memh);
@@ -312,8 +312,8 @@ void *MEM_lockfree_mallocN(size_t len, const char *str)
                }
 
                memh->len = len;
-               atomic_add_u(&totblock, 1);
-               atomic_add_z(&mem_in_use, len);
+               atomic_add_and_fetch_u(&totblock, 1);
+               atomic_add_and_fetch_z(&mem_in_use, len);
                update_maximum(&peak_mem, mem_in_use);
 
                return PTR_FROM_MEMHEAD(memh);
@@ -361,8 +361,8 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
 
                memh->len = len | (size_t) MEMHEAD_ALIGN_FLAG;
                memh->alignment = (short) alignment;
-               atomic_add_u(&totblock, 1);
-               atomic_add_z(&mem_in_use, len);
+               atomic_add_and_fetch_u(&totblock, 1);
+               atomic_add_and_fetch_z(&mem_in_use, len);
                update_maximum(&peak_mem, mem_in_use);
 
                return PTR_FROM_MEMHEAD(memh);
@@ -396,9 +396,9 @@ void *MEM_lockfree_mapallocN(size_t len, const char *str)
 
        if (memh != (MemHead *)-1) {
                memh->len = len | (size_t) MEMHEAD_MMAP_FLAG;
-               atomic_add_u(&totblock, 1);
-               atomic_add_z(&mem_in_use, len);
-               atomic_add_z(&mmap_in_use, len);
+               atomic_add_and_fetch_u(&totblock, 1);
+               atomic_add_and_fetch_z(&mem_in_use, len);
+               atomic_add_and_fetch_z(&mmap_in_use, len);
 
                update_maximum(&peak_mem, mem_in_use);
                update_maximum(&peak_mem, mmap_in_use);
index 02ae123a71ecbfce6da6eee3a4e1eda5b7ffc0c3..50f8423bbff96add5aaabc7c92507ca77e8b6562 100644 (file)
@@ -3284,7 +3284,7 @@ void DAG_threaded_update_handle_node_updated(void *node_v,
        for (itA = node->child; itA; itA = itA->next) {
                DagNode *child_node = itA->node;
                if (child_node != node) {
-                       atomic_sub_uint32(&child_node->num_pending_parents, 1);
+                       atomic_sub_and_fetch_uint32(&child_node->num_pending_parents, 1);
 
                        if (child_node->num_pending_parents == 0) {
                                bool need_schedule;
index c7399047ed5e8822f3fe490d67eb57890c247410..660709231537e08070497728d43c63399fee1476 100644 (file)
@@ -2264,7 +2264,7 @@ static void dynamic_paint_create_uv_surface_neighbor_cb(void *userdata, const in
                                                         * to non--1 *before* its tri_index is set (i.e. that it cannot be used a neighbour).
                                                         */
                                                        tPoint->neighbour_pixel = ind - 1;
-                                                       atomic_add_uint32(&tPoint->neighbour_pixel, 1);
+                                                       atomic_add_and_fetch_uint32(&tPoint->neighbour_pixel, 1);
                                                        tPoint->tri_index = i;
 
                                                        /* Now calculate pixel data for this pixel as it was on polygon surface */
@@ -2289,7 +2289,7 @@ static void dynamic_paint_create_uv_surface_neighbor_cb(void *userdata, const in
 
                /* Increase the final number of active surface points if relevant. */
                if (tPoint->tri_index != -1)
-                       atomic_add_uint32(active_points, 1);
+                       atomic_add_and_fetch_uint32(active_points, 1);
        }
 }
 
index 016c9c863f0c80c2e549089ec0231326115311e9..a3fe73e4b11c32bef9f38446922d35262ef24f04 100644 (file)
@@ -238,7 +238,7 @@ static void mesh_calc_normals_poly_accum_task_cb(void *userdata, const int pidx)
 
                        /* accumulate */
                        for (int k = 3; k--; ) {
-                               atomic_add_fl(&vnors[ml[i].v][k], pnor[k] * fac);
+                               atomic_add_and_fetch_fl(&vnors[ml[i].v][k], pnor[k] * fac);
                        }
                        prev_edge = cur_edge;
                }
index ff69f381b06f65836708b22d66b64b4d01264502..4fe4d6e75a6ceb14638346c260fe178bd3922d2a 100644 (file)
@@ -977,7 +977,7 @@ static void pbvh_update_normals_accum_task_cb(void *userdata, const int n)
                                         *       Not exact equivalent though, since atomicity is only ensured for one component
                                         *       of the vector at a time, but here it shall not make any sensible difference. */
                                        for (int k = 3; k--; ) {
-                                               atomic_add_fl(&vnors[v][k], fn[k]);
+                                               atomic_add_and_fetch_fl(&vnors[v][k], fn[k]);
                                        }
                                }
                        }
index 436cd2b8fde0e0f51fd04334e2ccd13fb426ffce..fc2d9674c2f90401b85b1a4f8560a8b787fb16e0 100644 (file)
@@ -237,7 +237,7 @@ static void task_pool_num_decrease(TaskPool *pool, size_t done)
        BLI_assert(pool->num >= done);
 
        pool->num -= done;
-       atomic_sub_z(&pool->currently_running_tasks, done);
+       atomic_sub_and_fetch_z(&pool->currently_running_tasks, done);
        pool->done += done;
 
        if (pool->num == 0)
@@ -292,7 +292,7 @@ static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task
                                continue;
                        }
 
-                       if (atomic_add_z(&pool->currently_running_tasks, 1) <= pool->num_threads ||
+                       if (atomic_add_and_fetch_z(&pool->currently_running_tasks, 1) <= pool->num_threads ||
                            pool->num_threads == 0)
                        {
                                *task = current_task;
@@ -301,7 +301,7 @@ static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task
                                break;
                        }
                        else {
-                               atomic_sub_z(&pool->currently_running_tasks, 1);
+                               atomic_sub_and_fetch_z(&pool->currently_running_tasks, 1);
                        }
                }
                if (!found_task)
@@ -669,7 +669,7 @@ void BLI_task_pool_work_and_wait(TaskPool *pool)
                /* if found task, do it, otherwise wait until other tasks are done */
                if (found_task) {
                        /* run task */
-                       atomic_add_z(&pool->currently_running_tasks, 1);
+                       atomic_add_and_fetch_z(&pool->currently_running_tasks, 1);
                        work_task->run(pool, work_task->taskdata, 0);
 
                        /* delete task */
index e5c2b8ace4e57a6cc122d5ca0aac116032e3d439..9a47c6b2438ea1edeab002768cb7da7525108634 100644 (file)
@@ -383,7 +383,7 @@ void ExecutionGroup::finalizeChunkExecution(int chunkNumber, MemoryBuffer **memo
        if (this->m_chunkExecutionStates[chunkNumber] == COM_ES_SCHEDULED)
                this->m_chunkExecutionStates[chunkNumber] = COM_ES_EXECUTED;
        
-       atomic_add_u(&this->m_chunksFinished, 1);
+       atomic_add_and_fetch_u(&this->m_chunksFinished, 1);
        if (memoryBuffers) {
                for (unsigned int index = 0; index < this->m_cachedMaxReadBufferOffset; index++) {
                        MemoryBuffer *buffer = memoryBuffers[index];
index c3fd202d83250791add562b24119bd7212305dd1..e926f83bcbefcc2641e8e3188e1688799d27d201 100644 (file)
@@ -152,7 +152,7 @@ static void deg_task_run_func(TaskPool *pool,
                                }
                                if ((rel->flag & DEPSREL_FLAG_CYCLIC) == 0) {
                                        BLI_assert(child->num_links_pending > 0);
-                                       atomic_sub_uint32(&child->num_links_pending, 1);
+                                       atomic_sub_and_fetch_uint32(&child->num_links_pending, 1);
                                }
                                if (child->num_links_pending == 0) {
                                        bool is_scheduled = atomic_fetch_and_or_uint8(
@@ -287,7 +287,7 @@ static void schedule_node(TaskPool *pool, Depsgraph *graph, unsigned int layers,
        {
                if (dec_parents) {
                        BLI_assert(node->num_links_pending > 0);
-                       atomic_sub_uint32(&node->num_links_pending, 1);
+                       atomic_sub_and_fetch_uint32(&node->num_links_pending, 1);
                }
 
                if (node->num_links_pending == 0) {
index 6af36ea677831492b5174532132c2363fbb984b8..83469a481652a38584223476e16515e266f5ed7d 100644 (file)
@@ -2510,7 +2510,7 @@ static void filelist_readjob_do(
                         * Using an atomic operation to avoid having to lock thread...
                         * Note that we do not really need this here currently, since there is a single listing thread, but better
                         * remain consistent about threading! */
-                       *((uint32_t *)entry->uuid) = atomic_add_uint32((uint32_t *)filelist->filelist_intern.curr_uuid, 1);
+                       *((uint32_t *)entry->uuid) = atomic_add_and_fetch_uint32((uint32_t *)filelist->filelist_intern.curr_uuid, 1);
 
                        /* Only thing we change in direntry here, so we need to free it first. */
                        MEM_freeN(entry->relpath);
index 4f5e34896fc9b990d1b0ee186f66c033d125a432..c588a4b33cfc62ba285b41d1a61b5f232f711782 100644 (file)
@@ -544,12 +544,12 @@ HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::QueryInterface(REFIID /*iid*/,
 
 ULONG STDMETHODCALLTYPE                PinnedMemoryAllocator::AddRef(void)
 {
-       return atomic_add_uint32(&mRefCount, 1U);
+       return atomic_add_and_fetch_uint32(&mRefCount, 1U);
 }
 
 ULONG STDMETHODCALLTYPE                PinnedMemoryAllocator::Release(void)
 {
-       uint32_t newCount = atomic_sub_uint32(&mRefCount, 1U);
+       uint32_t newCount = atomic_sub_and_fetch_uint32(&mRefCount, 1U);
        if (newCount == 0)
                delete this;
        return (ULONG)newCount;