00001 #ifndef GALOIS_C__11_COMPAT_ATOMIC_H
00002 #define GALOIS_C__11_COMPAT_ATOMIC_H
00003
00004 #include "type_traits.h"
00005
00006
00007 namespace std {
00008
00009 typedef enum memory_order
00010 {
00011 memory_order_relaxed,
00012 memory_order_consume,
00013 memory_order_acquire,
00014 memory_order_release,
00015 memory_order_acq_rel,
00016 memory_order_seq_cst
00017 } memory_order;
00018
00019 }
00020
00021 #if __IBMCPP__ && __PPC__
00022
00023 # include "atomic_internal_gcc_generic.h"
00024 #elif __GNUC__
00025 # include "atomic_internal_gcc_generic.h"
00026 #else
00027 # error "Unknown machine architecture"
00028 #endif
00029
00030 namespace std {
00031
00032 template<class _Tp>
00033 class atomic {
00034 _Tp _M_i;
00035
00036 atomic(const atomic&);
00037 atomic& operator=(const atomic&);
00038 atomic& operator=(const atomic&) volatile;
00039
00040 public:
00041 atomic() { }
00042 constexpr atomic(_Tp __i): _M_i(__i) { }
00043 operator _Tp() const { return load(); }
00044 operator _Tp() const volatile { return load(); }
00045 _Tp operator=(_Tp __i) { store(__i); return __i; }
00046 _Tp operator=(_Tp __i) volatile { store(__i); return __i; }
00047
00048 void store(_Tp __i, memory_order _m = memory_order_seq_cst) { __atomic_store(&_M_i, &__i, _m); }
00049 void store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile { __atomic_store(&_M_i, &__i, _m); }
00050 _Tp load(memory_order _m = memory_order_seq_cst) const {
00051 _Tp tmp;
00052 __atomic_load(&_M_i, &tmp, _m);
00053 return tmp;
00054 }
00055 _Tp load(memory_order _m = memory_order_seq_cst) const volatile {
00056 _Tp tmp;
00057 __atomic_load(&_M_i, &tmp, _m);
00058 return tmp;
00059 }
00060 _Tp exchange(_Tp __i, memory_order _m = memory_order_seq_cst) {
00061 return __atomic_exchange(&_M_i, __i, _m);
00062 }
00063 _Tp exchange(_Tp __i, memory_order _m = memory_order_seq_cst) volatile {
00064 return __atomic_exchange(&_M_i, __i, _m);
00065 }
00066 bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) {
00067 return __atomic_compare_exchange(&_M_i, &__e, &__i, true, _m1, _m2);
00068 }
00069 bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) volatile {
00070 return __atomic_compare_exchange(&_M_i, &__e, &__i, true, _m1, _m2);
00071 }
00072 bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) {
00073 return compare_exchange_weak(__e, __i, _m, _m);
00074 }
00075 bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) volatile {
00076 return compare_exchange_weak(__e, __i, _m, _m);
00077 }
00078 bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) {
00079 return __atomic_compare_exchange(&_M_i, &__e, &__i, false, _m1, _m2);
00080 }
00081 bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) volatile {
00082 return __atomic_compare_exchange(&_M_i, &__e, &__i, false, _m1, _m2);
00083 }
00084 bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) {
00085 return compare_exchange_strong(__e, __i, _m, _m);
00086 }
00087 bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) volatile {
00088 return compare_exchange_strong(__e, __i, _m, _m);
00089 }
00090
00091 template<bool Enable = std::is_integral<_Tp>::value>
00092 _Tp fetch_xor(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) {
00093 return __atomic_fetch_xor(&_M_i, __i, _m);
00094 }
00095 template<bool Enable = std::is_integral<_Tp>::value>
00096 _Tp fetch_xor(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) volatile {
00097 return __atomic_fetch_xor(&_M_i, __i, _m);
00098 }
00099
00100 template<bool Enable = std::is_integral<_Tp>::value>
00101 _Tp fetch_or(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) {
00102 return __atomic_fetch_or(&_M_i, __i, _m);
00103 }
00104 template<bool Enable = std::is_integral<_Tp>::value>
00105 _Tp fetch_or(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) volatile {
00106 return __atomic_fetch_or(&_M_i, __i, _m);
00107 }
00108
00109 template<bool Enable = std::is_integral<_Tp>::value>
00110 _Tp fetch_add(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) {
00111 return __atomic_fetch_add(&_M_i, __i, _m);
00112 }
00113 template<bool Enable = std::is_integral<_Tp>::value>
00114 _Tp operator++() {
00115 return fetch_add(1) + 1;
00116 }
00117 template<bool Enable = std::is_integral<_Tp>::value>
00118 _Tp fetch_add(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) volatile {
00119 return __atomic_fetch_add(&_M_i, __i, _m);
00120 }
00121 template<bool Enable = std::is_integral<_Tp>::value>
00122 _Tp operator++() volatile {
00123 return fetch_add(1) + 1;
00124 }
00125 };
00126
00127 }
00128
00129 #endif