30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
33#pragma GCC system_header
40#ifndef _GLIBCXX_ALWAYS_INLINE
41#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
44namespace std _GLIBCXX_VISIBILITY(default)
46_GLIBCXX_BEGIN_NAMESPACE_VERSION
56#if __cplusplus > 201703L
67 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
68 inline constexpr memory_order memory_order_consume = memory_order::consume;
69 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
70 inline constexpr memory_order memory_order_release = memory_order::release;
71 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
72 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
85 enum __memory_order_modifier
87 __memory_order_mask = 0x0ffff,
88 __memory_order_modifier_mask = 0xffff0000,
89 __memory_order_hle_acquire = 0x10000,
90 __memory_order_hle_release = 0x20000
109 return __m == memory_order_acq_rel ? memory_order_acquire
110 : __m == memory_order_release ? memory_order_relaxed : __m;
116 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
117 | __memory_order_modifier(__m & __memory_order_modifier_mask));
120 _GLIBCXX_ALWAYS_INLINE
void
122 { __atomic_thread_fence(
int(__m)); }
124 _GLIBCXX_ALWAYS_INLINE
void
126 { __atomic_signal_fence(
int(__m)); }
129 template<
typename _Tp>
139 template<
typename _IntTp>
140 struct __atomic_base;
142#if __cplusplus <= 201703L
143# define _GLIBCXX20_INIT(I)
145# define __cpp_lib_atomic_value_initialization 201911L
146# define _GLIBCXX20_INIT(I) = I
149#define ATOMIC_VAR_INIT(_VI) { _VI }
151 template<
typename _Tp>
154 template<
typename _Tp>
158#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
159 typedef bool __atomic_flag_data_type;
161 typedef unsigned char __atomic_flag_data_type;
174 _GLIBCXX_BEGIN_EXTERN_C
181 _GLIBCXX_END_EXTERN_C
183#define ATOMIC_FLAG_INIT { 0 }
199 _GLIBCXX_ALWAYS_INLINE
bool
205 _GLIBCXX_ALWAYS_INLINE
bool
206 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
211 _GLIBCXX_ALWAYS_INLINE
void
215 __glibcxx_assert(__b != memory_order_consume);
216 __glibcxx_assert(__b != memory_order_acquire);
217 __glibcxx_assert(__b != memory_order_acq_rel);
222 _GLIBCXX_ALWAYS_INLINE
void
226 __glibcxx_assert(__b != memory_order_consume);
227 __glibcxx_assert(__b != memory_order_acquire);
228 __glibcxx_assert(__b != memory_order_acq_rel);
264 template<
typename _ITp>
267 using value_type = _ITp;
268 using difference_type = value_type;
271 typedef _ITp __int_type;
273 static constexpr int _S_alignment =
274 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) :
alignof(_ITp);
276 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
288 operator __int_type()
const noexcept
291 operator __int_type()
const volatile noexcept
295 operator=(__int_type __i)
noexcept
302 operator=(__int_type __i)
volatile noexcept
309 operator++(
int)
noexcept
310 {
return fetch_add(1); }
313 operator++(
int)
volatile noexcept
314 {
return fetch_add(1); }
317 operator--(
int)
noexcept
318 {
return fetch_sub(1); }
321 operator--(
int)
volatile noexcept
322 {
return fetch_sub(1); }
325 operator++()
noexcept
329 operator++()
volatile noexcept
333 operator--()
noexcept
337 operator--()
volatile noexcept
341 operator+=(__int_type __i)
noexcept
345 operator+=(__int_type __i)
volatile noexcept
349 operator-=(__int_type __i)
noexcept
353 operator-=(__int_type __i)
volatile noexcept
357 operator&=(__int_type __i)
noexcept
361 operator&=(__int_type __i)
volatile noexcept
365 operator|=(__int_type __i)
noexcept
369 operator|=(__int_type __i)
volatile noexcept
373 operator^=(__int_type __i)
noexcept
377 operator^=(__int_type __i)
volatile noexcept
381 is_lock_free()
const noexcept
385 reinterpret_cast<void *
>(-_S_alignment));
389 is_lock_free()
const volatile noexcept
393 reinterpret_cast<void *
>(-_S_alignment));
396 _GLIBCXX_ALWAYS_INLINE
void
397 store(__int_type __i,
memory_order __m = memory_order_seq_cst)
noexcept
400 __glibcxx_assert(__b != memory_order_acquire);
401 __glibcxx_assert(__b != memory_order_acq_rel);
402 __glibcxx_assert(__b != memory_order_consume);
407 _GLIBCXX_ALWAYS_INLINE
void
408 store(__int_type __i,
412 __glibcxx_assert(__b != memory_order_acquire);
413 __glibcxx_assert(__b != memory_order_acq_rel);
414 __glibcxx_assert(__b != memory_order_consume);
419 _GLIBCXX_ALWAYS_INLINE __int_type
423 __glibcxx_assert(__b != memory_order_release);
424 __glibcxx_assert(__b != memory_order_acq_rel);
429 _GLIBCXX_ALWAYS_INLINE __int_type
433 __glibcxx_assert(__b != memory_order_release);
434 __glibcxx_assert(__b != memory_order_acq_rel);
439 _GLIBCXX_ALWAYS_INLINE __int_type
440 exchange(__int_type __i,
447 _GLIBCXX_ALWAYS_INLINE __int_type
448 exchange(__int_type __i,
454 _GLIBCXX_ALWAYS_INLINE
bool
455 compare_exchange_weak(__int_type&
__i1, __int_type
__i2,
460 __glibcxx_assert(
__b2 != memory_order_release);
461 __glibcxx_assert(
__b2 != memory_order_acq_rel);
468 _GLIBCXX_ALWAYS_INLINE
bool
469 compare_exchange_weak(__int_type&
__i1, __int_type
__i2,
475 __glibcxx_assert(
__b2 != memory_order_release);
476 __glibcxx_assert(
__b2 != memory_order_acq_rel);
483 _GLIBCXX_ALWAYS_INLINE
bool
484 compare_exchange_weak(__int_type&
__i1, __int_type
__i2,
488 __cmpexch_failure_order(
__m));
491 _GLIBCXX_ALWAYS_INLINE
bool
492 compare_exchange_weak(__int_type&
__i1, __int_type
__i2,
496 __cmpexch_failure_order(
__m));
499 _GLIBCXX_ALWAYS_INLINE
bool
500 compare_exchange_strong(__int_type&
__i1, __int_type
__i2,
505 __glibcxx_assert(
__b2 != memory_order_release);
506 __glibcxx_assert(
__b2 != memory_order_acq_rel);
513 _GLIBCXX_ALWAYS_INLINE
bool
514 compare_exchange_strong(__int_type&
__i1, __int_type
__i2,
521 __glibcxx_assert(
__b2 != memory_order_release);
522 __glibcxx_assert(
__b2 != memory_order_acq_rel);
529 _GLIBCXX_ALWAYS_INLINE
bool
530 compare_exchange_strong(__int_type&
__i1, __int_type
__i2,
534 __cmpexch_failure_order(
__m));
537 _GLIBCXX_ALWAYS_INLINE
bool
538 compare_exchange_strong(__int_type&
__i1, __int_type
__i2,
542 __cmpexch_failure_order(
__m));
545 _GLIBCXX_ALWAYS_INLINE __int_type
546 fetch_add(__int_type __i,
550 _GLIBCXX_ALWAYS_INLINE __int_type
551 fetch_add(__int_type __i,
555 _GLIBCXX_ALWAYS_INLINE __int_type
556 fetch_sub(__int_type __i,
560 _GLIBCXX_ALWAYS_INLINE __int_type
561 fetch_sub(__int_type __i,
565 _GLIBCXX_ALWAYS_INLINE __int_type
566 fetch_and(__int_type __i,
570 _GLIBCXX_ALWAYS_INLINE __int_type
571 fetch_and(__int_type __i,
575 _GLIBCXX_ALWAYS_INLINE __int_type
576 fetch_or(__int_type __i,
580 _GLIBCXX_ALWAYS_INLINE __int_type
581 fetch_or(__int_type __i,
585 _GLIBCXX_ALWAYS_INLINE __int_type
586 fetch_xor(__int_type __i,
590 _GLIBCXX_ALWAYS_INLINE __int_type
591 fetch_xor(__int_type __i,
598 template<
typename _PTp>
608 _M_type_size(
ptrdiff_t __d)
const {
return __d *
sizeof(
_PTp); }
611 _M_type_size(
ptrdiff_t __d)
const volatile {
return __d *
sizeof(
_PTp); }
644 operator++(
int)
noexcept
645 {
return fetch_add(1); }
648 operator++(
int)
volatile noexcept
649 {
return fetch_add(1); }
652 operator--(
int)
noexcept
653 {
return fetch_sub(1); }
656 operator--(
int)
volatile noexcept
657 {
return fetch_sub(1); }
660 operator++()
noexcept
662 int(memory_order_seq_cst)); }
665 operator++()
volatile noexcept
667 int(memory_order_seq_cst)); }
670 operator--()
noexcept
672 int(memory_order_seq_cst)); }
675 operator--()
volatile noexcept
677 int(memory_order_seq_cst)); }
682 int(memory_order_seq_cst)); }
685 operator+=(
ptrdiff_t __d)
volatile noexcept
687 int(memory_order_seq_cst)); }
692 int(memory_order_seq_cst)); }
695 operator-=(
ptrdiff_t __d)
volatile noexcept
697 int(memory_order_seq_cst)); }
700 is_lock_free()
const noexcept
704 reinterpret_cast<void *
>(-
__alignof(_M_p)));
708 is_lock_free()
const volatile noexcept
712 reinterpret_cast<void *
>(-
__alignof(_M_p)));
715 _GLIBCXX_ALWAYS_INLINE
void
721 __glibcxx_assert(__b != memory_order_acquire);
722 __glibcxx_assert(__b != memory_order_acq_rel);
723 __glibcxx_assert(__b != memory_order_consume);
728 _GLIBCXX_ALWAYS_INLINE
void
733 __glibcxx_assert(__b != memory_order_acquire);
734 __glibcxx_assert(__b != memory_order_acq_rel);
735 __glibcxx_assert(__b != memory_order_consume);
744 __glibcxx_assert(__b != memory_order_release);
745 __glibcxx_assert(__b != memory_order_acq_rel);
754 __glibcxx_assert(__b != memory_order_release);
755 __glibcxx_assert(__b != memory_order_acq_rel);
775 _GLIBCXX_ALWAYS_INLINE
bool
782 __glibcxx_assert(
__b2 != memory_order_release);
783 __glibcxx_assert(
__b2 != memory_order_acq_rel);
790 _GLIBCXX_ALWAYS_INLINE
bool
798 __glibcxx_assert(
__b2 != memory_order_release);
799 __glibcxx_assert(
__b2 != memory_order_acq_rel);
827#if __cplusplus > 201703L
832 template<
typename _Tp>
836 template<
typename _Tp>
839 template<
size_t _Size,
size_t _Align>
840 _GLIBCXX_ALWAYS_INLINE
bool
847 template<
typename _Tp>
848 _GLIBCXX_ALWAYS_INLINE
void
849 store(_Tp* __ptr, _Val<_Tp> __t,
memory_order __m)
noexcept
852 template<
typename _Tp>
853 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
856 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
857 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
858 __atomic_load(__ptr, __dest,
int(__m));
862 template<
typename _Tp>
863 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
866 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
867 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
872 template<
typename _Tp>
873 _GLIBCXX_ALWAYS_INLINE
bool
874 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
880 int(__success),
int(__failure));
883 template<
typename _Tp>
884 _GLIBCXX_ALWAYS_INLINE
bool
885 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
891 int(__success),
int(__failure));
894 template<
typename _Tp>
895 _GLIBCXX_ALWAYS_INLINE _Tp
896 fetch_add(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m)
noexcept
897 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
899 template<
typename _Tp>
900 _GLIBCXX_ALWAYS_INLINE _Tp
901 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m)
noexcept
902 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
904 template<
typename _Tp>
905 _GLIBCXX_ALWAYS_INLINE _Tp
906 fetch_and(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
907 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
909 template<
typename _Tp>
910 _GLIBCXX_ALWAYS_INLINE _Tp
911 fetch_or(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
912 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
914 template<
typename _Tp>
915 _GLIBCXX_ALWAYS_INLINE _Tp
916 fetch_xor(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
917 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
919 template<
typename _Tp>
920 _GLIBCXX_ALWAYS_INLINE _Tp
921 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
922 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
924 template<
typename _Tp>
925 _GLIBCXX_ALWAYS_INLINE _Tp
926 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
927 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
929 template<
typename _Tp>
930 _GLIBCXX_ALWAYS_INLINE _Tp
931 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
932 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
934 template<
typename _Tp>
935 _GLIBCXX_ALWAYS_INLINE _Tp
936 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
937 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
939 template<
typename _Tp>
940 _GLIBCXX_ALWAYS_INLINE _Tp
941 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
942 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
944 template<
typename _Tp>
946 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
948 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
949 _Val<_Tp> __newval = __oldval + __i;
950 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
951 memory_order_relaxed))
952 __newval = __oldval + __i;
956 template<
typename _Tp>
958 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
960 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
961 _Val<_Tp> __newval = __oldval - __i;
962 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
963 memory_order_relaxed))
964 __newval = __oldval - __i;
968 template<
typename _Tp>
970 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
972 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
973 _Val<_Tp> __newval = __oldval + __i;
974 while (!compare_exchange_weak(__ptr, __oldval, __newval,
975 memory_order_seq_cst,
976 memory_order_relaxed))
977 __newval = __oldval + __i;
981 template<
typename _Tp>
983 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
985 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
986 _Val<_Tp> __newval = __oldval - __i;
987 while (!compare_exchange_weak(__ptr, __oldval, __newval,
988 memory_order_seq_cst,
989 memory_order_relaxed))
990 __newval = __oldval - __i;
996 template<
typename _Fp>
997 struct __atomic_float
999 static_assert(is_floating_point_v<_Fp>);
1001 static constexpr size_t _S_alignment = __alignof__(_Fp);
1004 using value_type = _Fp;
1005 using difference_type = value_type;
1007 static constexpr bool is_always_lock_free
1008 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1010 __atomic_float() =
default;
1013 __atomic_float(_Fp __t) : _M_fp(__t)
1016 __atomic_float(
const __atomic_float&) =
delete;
1017 __atomic_float& operator=(
const __atomic_float&) =
delete;
1018 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1021 operator=(_Fp __t)
volatile noexcept
1028 operator=(_Fp __t)
noexcept
1035 is_lock_free() const volatile noexcept
1036 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1039 is_lock_free() const noexcept
1040 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1043 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
volatile noexcept
1044 { __atomic_impl::store(&_M_fp, __t, __m); }
1047 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
noexcept
1048 { __atomic_impl::store(&_M_fp, __t, __m); }
1051 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
1052 {
return __atomic_impl::load(&_M_fp, __m); }
1055 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1056 {
return __atomic_impl::load(&_M_fp, __m); }
1058 operator _Fp() const volatile noexcept {
return this->load(); }
1059 operator _Fp() const noexcept {
return this->load(); }
1063 memory_order __m = memory_order_seq_cst)
volatile noexcept
1064 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1069 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1072 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1076 return __atomic_impl::compare_exchange_weak(&_M_fp,
1077 __expected, __desired,
1078 __success, __failure);
1082 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1086 return __atomic_impl::compare_exchange_weak(&_M_fp,
1087 __expected, __desired,
1088 __success, __failure);
1092 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1096 return __atomic_impl::compare_exchange_strong(&_M_fp,
1097 __expected, __desired,
1098 __success, __failure);
1102 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1106 return __atomic_impl::compare_exchange_strong(&_M_fp,
1107 __expected, __desired,
1108 __success, __failure);
1112 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1116 return compare_exchange_weak(__expected, __desired, __order,
1117 __cmpexch_failure_order(__order));
1121 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1125 return compare_exchange_weak(__expected, __desired, __order,
1126 __cmpexch_failure_order(__order));
1130 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1134 return compare_exchange_strong(__expected, __desired, __order,
1135 __cmpexch_failure_order(__order));
1139 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1143 return compare_exchange_strong(__expected, __desired, __order,
1144 __cmpexch_failure_order(__order));
1148 fetch_add(value_type __i,
1150 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1153 fetch_add(value_type __i,
1154 memory_order __m = memory_order_seq_cst)
volatile noexcept
1155 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1158 fetch_sub(value_type __i,
1160 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1163 fetch_sub(value_type __i,
1164 memory_order __m = memory_order_seq_cst)
volatile noexcept
1165 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1168 operator+=(value_type __i)
noexcept
1169 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1172 operator+=(value_type __i)
volatile noexcept
1173 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1176 operator-=(value_type __i)
noexcept
1177 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1180 operator-=(value_type __i)
volatile noexcept
1181 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1184 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1186#undef _GLIBCXX20_INIT
1188 template<
typename _Tp,
1189 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1190 struct __atomic_ref;
1193 template<
typename _Tp>
1194 struct __atomic_ref<_Tp, false, false>
1196 static_assert(is_trivially_copyable_v<_Tp>);
1199 static constexpr int _S_min_alignment
1200 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1204 using value_type = _Tp;
1206 static constexpr bool is_always_lock_free
1207 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1209 static constexpr size_t required_alignment
1210 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1212 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1216 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1218 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1221 operator=(_Tp __t)
const noexcept
1227 operator _Tp() const noexcept {
return this->load(); }
1230 is_lock_free() const noexcept
1231 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1234 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1235 { __atomic_impl::store(_M_ptr, __t, __m); }
1238 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1239 {
return __atomic_impl::load(_M_ptr, __m); }
1244 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1247 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1251 return __atomic_impl::compare_exchange_weak(_M_ptr,
1252 __expected, __desired,
1253 __success, __failure);
1257 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1261 return __atomic_impl::compare_exchange_strong(_M_ptr,
1262 __expected, __desired,
1263 __success, __failure);
1267 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1271 return compare_exchange_weak(__expected, __desired, __order,
1272 __cmpexch_failure_order(__order));
1276 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1280 return compare_exchange_strong(__expected, __desired, __order,
1281 __cmpexch_failure_order(__order));
1289 template<
typename _Tp>
1290 struct __atomic_ref<_Tp, true, false>
1292 static_assert(is_integral_v<_Tp>);
1295 using value_type = _Tp;
1296 using difference_type = value_type;
1298 static constexpr bool is_always_lock_free
1299 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1301 static constexpr size_t required_alignment
1302 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1304 __atomic_ref() =
delete;
1305 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1308 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1309 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1311 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1314 operator=(_Tp __t)
const noexcept
1320 operator _Tp() const noexcept {
return this->load(); }
1323 is_lock_free() const noexcept
1325 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1329 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1330 { __atomic_impl::store(_M_ptr, __t, __m); }
1333 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1334 {
return __atomic_impl::load(_M_ptr, __m); }
1338 memory_order __m = memory_order_seq_cst)
const noexcept
1339 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1342 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1346 return __atomic_impl::compare_exchange_weak(_M_ptr,
1347 __expected, __desired,
1348 __success, __failure);
1352 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1356 return __atomic_impl::compare_exchange_strong(_M_ptr,
1357 __expected, __desired,
1358 __success, __failure);
1362 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1366 return compare_exchange_weak(__expected, __desired, __order,
1367 __cmpexch_failure_order(__order));
1371 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1375 return compare_exchange_strong(__expected, __desired, __order,
1376 __cmpexch_failure_order(__order));
1380 fetch_add(value_type __i,
1381 memory_order __m = memory_order_seq_cst)
const noexcept
1382 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1385 fetch_sub(value_type __i,
1386 memory_order __m = memory_order_seq_cst)
const noexcept
1387 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1390 fetch_and(value_type __i,
1391 memory_order __m = memory_order_seq_cst)
const noexcept
1392 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1395 fetch_or(value_type __i,
1396 memory_order __m = memory_order_seq_cst)
const noexcept
1397 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1400 fetch_xor(value_type __i,
1401 memory_order __m = memory_order_seq_cst)
const noexcept
1402 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1404 _GLIBCXX_ALWAYS_INLINE value_type
1405 operator++(
int)
const noexcept
1406 {
return fetch_add(1); }
1408 _GLIBCXX_ALWAYS_INLINE value_type
1409 operator--(
int)
const noexcept
1410 {
return fetch_sub(1); }
1413 operator++() const noexcept
1414 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1417 operator--() const noexcept
1418 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1421 operator+=(value_type __i)
const noexcept
1422 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1425 operator-=(value_type __i)
const noexcept
1426 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1429 operator&=(value_type __i)
const noexcept
1430 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1433 operator|=(value_type __i)
const noexcept
1434 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1437 operator^=(value_type __i)
const noexcept
1438 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1445 template<
typename _Fp>
1446 struct __atomic_ref<_Fp, false, true>
1448 static_assert(is_floating_point_v<_Fp>);
1451 using value_type = _Fp;
1452 using difference_type = value_type;
1454 static constexpr bool is_always_lock_free
1455 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1457 static constexpr size_t required_alignment = __alignof__(_Fp);
1459 __atomic_ref() =
delete;
1460 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1463 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1464 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1466 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1469 operator=(_Fp __t)
const noexcept
1475 operator _Fp() const noexcept {
return this->load(); }
1478 is_lock_free() const noexcept
1480 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1484 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1485 { __atomic_impl::store(_M_ptr, __t, __m); }
1488 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1489 {
return __atomic_impl::load(_M_ptr, __m); }
1493 memory_order __m = memory_order_seq_cst)
const noexcept
1494 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1497 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1501 return __atomic_impl::compare_exchange_weak(_M_ptr,
1502 __expected, __desired,
1503 __success, __failure);
1507 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1511 return __atomic_impl::compare_exchange_strong(_M_ptr,
1512 __expected, __desired,
1513 __success, __failure);
1517 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1521 return compare_exchange_weak(__expected, __desired, __order,
1522 __cmpexch_failure_order(__order));
1526 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1530 return compare_exchange_strong(__expected, __desired, __order,
1531 __cmpexch_failure_order(__order));
1535 fetch_add(value_type __i,
1536 memory_order __m = memory_order_seq_cst)
const noexcept
1537 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1540 fetch_sub(value_type __i,
1541 memory_order __m = memory_order_seq_cst)
const noexcept
1542 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1545 operator+=(value_type __i)
const noexcept
1546 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1549 operator-=(value_type __i)
const noexcept
1550 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1557 template<
typename _Tp>
1558 struct __atomic_ref<_Tp*,
false,
false>
1561 using value_type = _Tp*;
1562 using difference_type = ptrdiff_t;
1564 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1566 static constexpr size_t required_alignment = __alignof__(_Tp*);
1568 __atomic_ref() =
delete;
1569 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1573 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1575 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1578 operator=(_Tp* __t)
const noexcept
1584 operator _Tp*()
const noexcept {
return this->load(); }
1587 is_lock_free() const noexcept
1589 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1593 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1594 { __atomic_impl::store(_M_ptr, __t, __m); }
1597 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1598 {
return __atomic_impl::load(_M_ptr, __m); }
1602 memory_order __m = memory_order_seq_cst)
const noexcept
1603 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1606 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1610 return __atomic_impl::compare_exchange_weak(_M_ptr,
1611 __expected, __desired,
1612 __success, __failure);
1616 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1620 return __atomic_impl::compare_exchange_strong(_M_ptr,
1621 __expected, __desired,
1622 __success, __failure);
1626 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1630 return compare_exchange_weak(__expected, __desired, __order,
1631 __cmpexch_failure_order(__order));
1635 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1639 return compare_exchange_strong(__expected, __desired, __order,
1640 __cmpexch_failure_order(__order));
1643 _GLIBCXX_ALWAYS_INLINE value_type
1644 fetch_add(difference_type __d,
1645 memory_order __m = memory_order_seq_cst)
const noexcept
1646 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1648 _GLIBCXX_ALWAYS_INLINE value_type
1649 fetch_sub(difference_type __d,
1650 memory_order __m = memory_order_seq_cst)
const noexcept
1651 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1654 operator++(
int)
const noexcept
1655 {
return fetch_add(1); }
1658 operator--(
int)
const noexcept
1659 {
return fetch_sub(1); }
1662 operator++() const noexcept
1664 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1668 operator--() const noexcept
1670 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1674 operator+=(difference_type __d)
const noexcept
1676 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1680 operator-=(difference_type __d)
const noexcept
1682 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1686 static constexpr ptrdiff_t
1687 _S_type_size(ptrdiff_t __d)
noexcept
1689 static_assert(is_object_v<_Tp>);
1690 return __d *
sizeof(_Tp);
1700_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
memory_order
Enumeration for memory_order.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
ISO C++ entities toplevel namespace is std.
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val)
Assign __new_val to __obj and return its previous value.
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Base class for atomic integrals.
Base type for atomic_flag.