From abc354a5d0a941cf1f3877e487cce325cc609ba8 Mon Sep 17 00:00:00 2001 From: est31 Date: Tue, 3 Nov 2015 13:09:23 +0100 Subject: [PATCH] Atomic: cleanup and add more operations Cleanup: * Remove volatile keyword, it is of no use at all. [1] * Remove the enable_if stuff. It had no use either. The most likely explanation why the enable_if stuff was there is that it was used as something like a STATIC_ASSERT to verify that sizeof(T) is not larger than sizeof(void *). This check however is not just misplaced in a place where we already use a lock, it isn't needed at all, as gcc will just generate a call to to the runtime if it compiles for platforms that don't support atomic instructions. The runtime will then most likely use locks. Code style fixes: * Prefix name of the mutex * Line everything up nicely, where it makes things look nice * Filling \ continuations with spaces is code style rule Added operations on the atomic var: * Compare and swap * Swap The second point of the cleanup also fixes the Android build of the next commit. [1]: http://stackoverflow.com/q/2484980 --- src/threading/atomic.h | 89 +++++++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 35 deletions(-) diff --git a/src/threading/atomic.h b/src/threading/atomic.h index 486bc795..9cf50a82 100644 --- a/src/threading/atomic.h +++ b/src/threading/atomic.h @@ -29,64 +29,83 @@ with this program; if not, write to the Free Software Foundation, Inc., #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #define CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) #if GCC_VERSION >= 407 || CLANG_VERSION >= 302 - #define ATOMIC_LOAD(T, v) return __atomic_load_n(&(v), __ATOMIC_SEQ_CST) - #define ATOMIC_STORE(T, v, x) __atomic_store (&(v), &(x), __ATOMIC_SEQ_CST); return x - #define ATOMIC_ADD_EQ(T, v, x) return __atomic_add_fetch(&(v), (x), __ATOMIC_SEQ_CST) - #define ATOMIC_SUB_EQ(T, v, x) return __atomic_sub_fetch(&(v), (x), __ATOMIC_SEQ_CST) - #define ATOMIC_POST_INC(T, v) return __atomic_fetch_add(&(v), 1, __ATOMIC_SEQ_CST) - #define ATOMIC_POST_DEC(T, v) return __atomic_fetch_sub(&(v), 1, __ATOMIC_SEQ_CST) + #define ATOMIC_LOAD(T, v) return __atomic_load_n (&(v), __ATOMIC_SEQ_CST) + #define ATOMIC_STORE(T, v, x) __atomic_store (&(v), &(x), __ATOMIC_SEQ_CST); return x + #define ATOMIC_EXCHANGE(T, v, x) return __atomic_exchange_n(&(v), (x), __ATOMIC_SEQ_CST) + #define ATOMIC_ADD_EQ(T, v, x) return __atomic_add_fetch (&(v), (x), __ATOMIC_SEQ_CST) + #define ATOMIC_SUB_EQ(T, v, x) return __atomic_sub_fetch (&(v), (x), __ATOMIC_SEQ_CST) + #define ATOMIC_POST_INC(T, v) return __atomic_fetch_add (&(v), 1, __ATOMIC_SEQ_CST) + #define ATOMIC_POST_DEC(T, v) return __atomic_fetch_sub (&(v), 1, __ATOMIC_SEQ_CST) + #define ATOMIC_CAS(T, v, e, d) return __atomic_compare_exchange_n(&(v), &(e), (d), \ + false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #else #define ATOMIC_USE_LOCK #include "threading/mutex.h" #define ATOMIC_LOCK_OP(T, op) do { \ - mutex.lock(); \ - T _val = (op); \ - mutex.unlock(); \ - return _val; \ + m_mutex.lock(); \ + T _val = (op); \ + m_mutex.unlock(); \ + return _val; \ } while (0) - #define ATOMIC_LOAD(T, v) \ + #define ATOMIC_LOCK_CAS(T, v, e, d) do { \ + m_mutex.lock(); \ + bool _eq = (v == e); \ + if (_eq) \ + v = d; \ + m_mutex.unlock(); \ + return _eq; \ + } while (0) + #define ATOMIC_LOAD(T, v) \ if (sizeof(T) <= sizeof(void*)) return v; \ else ATOMIC_LOCK_OP(T, v); - #define ATOMIC_STORE(T, v, x) \ + #define ATOMIC_STORE(T, v, x) \ if (sizeof(T) <= sizeof(void*)) return v = x; \ else ATOMIC_LOCK_OP(T, v = x); -# if GCC_VERSION >= 401 - #define ATOMIC_ADD_EQ(T, v, x) return __sync_add_and_fetch(&(v), (x)) - #define ATOMIC_SUB_EQ(T, v, x) return __sync_sub_and_fetch(&(v), (x)) - #define ATOMIC_POST_INC(T, v) return __sync_fetch_and_add(&(v), 1) - #define ATOMIC_POST_DEC(T, v) return __sync_fetch_and_sub(&(v), 1) -# else - #define ATOMIC_ADD_EQ(T, v, x) ATOMIC_LOCK_OP(T, v += x) - #define ATOMIC_SUB_EQ(T, v, x) ATOMIC_LOCK_OP(T, v -= x) - #define ATOMIC_POST_INC(T, v) ATOMIC_LOCK_OP(T, v++) - #define ATOMIC_POST_DEC(T, v) ATOMIC_LOCK_OP(T, v--) -# endif + #define ATOMIC_EXCHANGE(T, v, x) do { \ + m_mutex.lock(); \ + T _val = v; \ + v = x; \ + m_mutex.unlock(); \ + return _val; \ + } while (0) + #if GCC_VERSION >= 401 + #define ATOMIC_ADD_EQ(T, v, x) return __sync_add_and_fetch(&(v), (x)) + #define ATOMIC_SUB_EQ(T, v, x) return __sync_sub_and_fetch(&(v), (x)) + #define ATOMIC_POST_INC(T, v) return __sync_fetch_and_add(&(v), 1) + #define ATOMIC_POST_DEC(T, v) return __sync_fetch_and_sub(&(v), 1) + #define ATOMIC_CAS(T, v, e, d) return __sync_bool_compare_and_swap(&(v), &(e), (d)) + #else + #define ATOMIC_ADD_EQ(T, v, x) ATOMIC_LOCK_OP(T, v += x) + #define ATOMIC_SUB_EQ(T, v, x) ATOMIC_LOCK_OP(T, v -= x) + #define ATOMIC_POST_INC(T, v) ATOMIC_LOCK_OP(T, v++) + #define ATOMIC_POST_DEC(T, v) ATOMIC_LOCK_OP(T, v--) + #define ATOMIC_CAS(T, v, e, d) ATOMIC_LOCK_CAS(T, v, e, d) + #endif #endif template -class Atomic -{ - // Like C++11 std::enable_if, but defaults to char since C++03 doesn't support SFINAE - template struct enable_if { typedef char type; }; - template struct enable_if { typedef T_ type; }; +class Atomic { public: - Atomic(const T &v=0) : val(v) {} + Atomic(const T &v = 0) : val(v) {} operator T () { ATOMIC_LOAD(T, val); } - T operator = (T x) { ATOMIC_STORE(T, val, x); } + + T exchange(T x) { ATOMIC_EXCHANGE(T, val, x); } + bool compare_exchange_strong(T &expected, T desired) { ATOMIC_CAS(T, val, expected, desired); } + + T operator = (T x) { ATOMIC_STORE(T, val, x); } T operator += (T x) { ATOMIC_ADD_EQ(T, val, x); } T operator -= (T x) { ATOMIC_SUB_EQ(T, val, x); } - T operator ++ () { return *this += 1; } - T operator -- () { return *this -= 1; } + T operator ++ () { return *this += 1; } + T operator -- () { return *this -= 1; } T operator ++ (int) { ATOMIC_POST_INC(T, val); } T operator -- (int) { ATOMIC_POST_DEC(T, val); } - private: - volatile T val; + T val; #ifdef ATOMIC_USE_LOCK - typename enable_if::type mutex; + Mutex m_mutex; #endif };