28 #ifndef __AtomicScalar_H__
29 #define __AtomicScalar_H__
35 #if (((OGRE_COMPILER == OGRE_COMPILER_GNUC) && (OGRE_COMP_VER >= 412)) || (OGRE_COMPILER == OGRE_COMPILER_CLANG)) && OGRE_THREAD_SUPPORT
37 #if ((OGRE_COMPILER == OGRE_COMPILER_GNUC) && (OGRE_COMP_VER >= 473)) || (OGRE_COMPILER == OGRE_COMPILER_CLANG)
38 #define BUILTIN_FETCH_ADD(var, add) __atomic_fetch_add (var, add, __ATOMIC_SEQ_CST);
39 #define BUILTIN_ADD_FETCH(var, add) __atomic_add_fetch (var, add, __ATOMIC_SEQ_CST);
40 #define BUILTIN_SUB_FETCH(var, sub) __atomic_sub_fetch (var, sub, __ATOMIC_SEQ_CST);
42 #define BUILTIN_FETCH_ADD(var, add) __sync_fetch_and_add (var, add);
43 #define BUILTIN_ADD_FETCH(var, add) __sync_add_and_fetch (var, add);
44 #define BUILTIN_SUB_FETCH(var, sub) __sync_sub_and_fetch (var, sub);
55 template<
class T>
class AtomicScalar
71 void operator= (
const AtomicScalar<T> &cousin)
86 bool cas (
const T &old,
const T &nu)
88 return __sync_bool_compare_and_swap (&
mField, old, nu);
93 return BUILTIN_ADD_FETCH (&
mField, 1);
98 return BUILTIN_ADD_FETCH (&
mField, -1);
103 return BUILTIN_FETCH_ADD (&
mField, 1);
108 return BUILTIN_FETCH_ADD (&
mField, -1);
113 return BUILTIN_ADD_FETCH (&
mField, add);
118 return BUILTIN_SUB_FETCH (&
mField, sub);
122 #if OGRE_CPU == OGRE_CPU_ARM
123 # if OGRE_COMPILER == OGRE_COMPILER_MSVC
124 __declspec(align(16)) volatile T
mField;
125 # elif (OGRE_COMPILER == OGRE_COMPILER_GNUC) || (OGRE_COMPILER == OGRE_COMPILER_CLANG)
126 volatile T mField __attribute__((__aligned__(16)));
138 #elif OGRE_COMPILER == OGRE_COMPILER_MSVC && OGRE_COMP_VER >= 1400 && OGRE_THREAD_SUPPORT
140 #ifndef WIN32_LEAN_AND_MEAN
141 # define WIN32_LEAN_AND_MEAN
143 #if !defined(NOMINMAX) && defined(_MSC_VER)
144 # define NOMINMAX // required to stop windows.h messing up std::min
150 # pragma warning (push)
151 # pragma warning (disable : 4244)
161 template<
class T>
class AtomicScalar
171 : mField(cousin.mField)
177 void operator= (
const AtomicScalar<T> &cousin)
179 mField = cousin.mField;
187 void set (
const T &v)
192 bool cas (
const T &old,
const T &nu)
195 return _InterlockedCompareExchange16((SHORT*)&mField, static_cast<SHORT>(nu), static_cast<SHORT>(old)) ==
static_cast<SHORT
>(old);
197 else if (
sizeof(T)==4)
199 return _InterlockedCompareExchange((LONG*)&mField, static_cast<LONG>(nu), static_cast<LONG>(old)) ==
static_cast<LONG
>(old);
201 else if (
sizeof(T)==8) {
202 return _InterlockedCompareExchange64((LONGLONG*)&mField, static_cast<LONGLONG>(nu), static_cast<LONGLONG>(old)) ==
static_cast<LONGLONG
>(old);
212 return _InterlockedIncrement16((SHORT*)&mField);
213 }
else if (
sizeof(T)==4) {
214 return InterlockedIncrement((LONG*)&mField);
215 }
else if (
sizeof(T)==8) {
216 return InterlockedIncrement64((LONGLONG*)&mField);
225 return _InterlockedDecrement16((SHORT*)&mField);
226 }
else if (
sizeof(T)==4) {
227 return InterlockedDecrement((LONG*)&mField);
228 }
else if (
sizeof(T)==8) {
229 return InterlockedDecrement64((LONGLONG*)&mField);
238 return _InterlockedIncrement16((SHORT*)&mField)-1;
239 }
else if (
sizeof(T)==4) {
240 return InterlockedIncrement((LONG*)&mField)-1;
241 }
else if (
sizeof(T)==8) {
242 return InterlockedIncrement64((LONGLONG*)&mField)-1;
251 return _InterlockedDecrement16((SHORT*)&mField)+1;
252 }
else if (
sizeof(T)==4) {
253 return InterlockedDecrement((LONG*)&mField)+1;
254 }
else if (
sizeof(T)==8) {
255 return InterlockedDecrement64((LONGLONG*)&mField)+1;
268 newVal = mField + add;
272 }
while (!
cas(newVal - add, newVal));
283 newVal = mField - sub;
287 }
while (!
cas(newVal + sub, newVal));
299 # pragma warning (pop)
322 : mField(cousin.mField)
331 mField = cousin.mField;
348 bool cas (
const T &old,
const T &nu)
351 if (mField != old)
return false;