00001 #include <builtins.h>
00002
00003 #error "Broken"
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 namespace detail {
00014
00015 inline bool atomic_compare_exchange_strong32(volatile int* __a, int* __e, int* __d, std::memory_order _succ, std::memory_order _fail) {
00016 bool tmp;
00017 int v = *__e;
00018 switch (_succ) {
00019 case std::memory_order_relaxed: return __compare_and_swap(__a, &v, *__d);
00020 case std::memory_order_consume: abort();
00021 case std::memory_order_acquire: tmp = __compare_and_swap(__a, &v, *__d); __isync(); return tmp;
00022 case std::memory_order_release: __lwsync(); return __compare_and_swap(__a, &v, *__d);
00023 case std::memory_order_acq_rel: __lwsync(); tmp = __compare_and_swap(__a, &v, *__d); __isync(); return tmp;
00024 case std::memory_order_seq_cst: __sync(); tmp = __compare_and_swap(__a, &v, *__d); __isync(); return tmp;
00025 default: abort();
00026 }
00027
00028 return tmp;
00029 }
00030
00031 #ifdef __PPC64__
00032 inline bool atomic_compare_exchange_strong64(volatile long* __a, long* __e, long* __d, std::memory_order _succ, std::memory_order _fail) {
00033 bool tmp;
00034 long v = *__e;
00035 switch (_succ) {
00036 case std::memory_order_relaxed: return __compare_and_swaplp(__a, &v, *__d);
00037 case std::memory_order_consume: abort();
00038 case std::memory_order_acquire: tmp = __compare_and_swaplp(__a, &v, *__d); __isync(); return tmp;
00039 case std::memory_order_release: __lwsync(); return __compare_and_swaplp(__a, &v, *__d);
00040 case std::memory_order_acq_rel: __lwsync(); tmp = __compare_and_swaplp(__a, &v, *__d); __isync(); return tmp;
00041 case std::memory_order_seq_cst: __sync(); tmp = __compare_and_swaplp(__a, &v, *__d); __isync(); return tmp;
00042 default: abort();
00043 }
00044
00045 return tmp;
00046 }
00047 #endif
00048
00049 template<class _Tp>
00050 bool atomic_compare_exchange_strong(volatile _Tp* __a, _Tp* __e, _Tp* __d, std::memory_order _succ, std::memory_order _fail) {
00051
00052
00053 #ifdef __PPC64__
00054 static_assert(sizeof(_Tp) <= 8, "Operation undefined on larger types");
00055 #else
00056 static_assert(sizeof(_Tp) <= 4, "Operation undefined on larger types");
00057 #endif
00058 if (sizeof(_Tp) <= 4)
00059 return detail::atomic_compare_exchange_strong32(reinterpret_cast<volatile int*>(__a), reinterpret_cast<int*>(__e), reinterpret_cast<int*>(__d), _succ, _fail);
00060 #ifdef __PPC64__
00061 else
00062 return detail::atomic_compare_exchange_strong64(reinterpret_cast<volatile long*>(__a), reinterpret_cast<long*>(__e), reinterpret_cast<long*>(__d), _succ, _fail);
00063 #endif
00064 abort();
00065 return false;
00066 }
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076 template<class _Tp>
00077 void weak_fence(volatile _Tp* __a) {
00078
00079 while (*__a != *__a)
00080 ;
00081 __lwsync();
00082 }
00083
00084 }
00085
00086 template<class _Tp>
00087 void __atomic_store(volatile _Tp* __a, _Tp* __i, std::memory_order _m) {
00088 switch (_m) {
00089 case std::memory_order_relaxed: *__a = *__i; break;
00090 case std::memory_order_consume:
00091 case std::memory_order_acquire: abort(); break;
00092 case std::memory_order_release:
00093 case std::memory_order_acq_rel: __lwsync(); *__a = *__i;
00094 case std::memory_order_seq_cst: __sync(); *__a = *__i; break;
00095 default: abort();
00096 }
00097 }
00098
00099 template<class _Tp>
00100 void __atomic_load(volatile _Tp* __a, _Tp* __i, std::memory_order _m) {
00101 switch (_m) {
00102 case std::memory_order_relaxed: *__i = *__a; break;
00103 case std::memory_order_consume:
00104 case std::memory_order_acquire: *__i = *__a; detail::weak_fence(__i); break;
00105 case std::memory_order_release: abort(); break;
00106 case std::memory_order_acq_rel: *__i = *__a; detail::weak_fence(__i); break;
00107 case std::memory_order_seq_cst: __sync(); *__i = *__a; detail::weak_fence(__i); break;
00108 default: abort();
00109 }
00110 }
00111
00112 template<class _Tp>
00113 void __atomic_load(volatile const _Tp* __a, _Tp* __i, std::memory_order _m) {
00114 __atomic_load(const_cast<_Tp*>(__a), __i, _m);
00115 }
00116
00117 template<class _Tp>
00118 bool __atomic_compare_exchange(volatile _Tp* __a, _Tp* __e, _Tp* __d, bool _weak, std::memory_order _succ, std::memory_order _fail) {
00119 return detail::atomic_compare_exchange_strong(__a, __e, __d, _succ, _fail);
00120 }
00121
00122 template<class _Tp>
00123 _Tp __atomic_fetch_xor(volatile _Tp* __a, _Tp __i, std::memory_order _m) {
00124 _Tp old;
00125 _Tp newval;
00126 do {
00127 old = *__a;
00128 newval = old ^ __i;
00129 } while (!__atomic_compare_exchange(__a, &old, &newval, true, _m, _m));
00130 return old;
00131 }
00132
00133 template<class _Tp>
00134 _Tp __atomic_fetch_add(volatile _Tp* __a, _Tp __i, std::memory_order _m) {
00135 _Tp old;
00136 _Tp newval;
00137 do {
00138 old = *__a;
00139 newval = old + __i;
00140 } while (!__atomic_compare_exchange(__a, &old, &newval, true, _m, _m));
00141 return old;
00142 }