summaryrefslogtreecommitdiffstats
path: root/Include/internal/pycore_atomic.h
diff options
context:
space:
mode:
authorVictor Stinner <vstinner@redhat.com>2019-03-04 13:21:28 (GMT)
committerGitHub <noreply@github.com>2019-03-04 13:21:28 (GMT)
commit4d61e6e3b802399be62a521d6fa785698cb670b5 (patch)
tree9eb5258b1479331b29b9ce00e6b55c1dc01f9d06 /Include/internal/pycore_atomic.h
parentf4b0a1c0da80318e0a4f4c70d2722f01ce3512dd (diff)
downloadcpython-4d61e6e3b802399be62a521d6fa785698cb670b5.zip
cpython-4d61e6e3b802399be62a521d6fa785698cb670b5.tar.gz
cpython-4d61e6e3b802399be62a521d6fa785698cb670b5.tar.bz2
Revert: bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (GH-11617) (GH-12159)
* Revert "bpo-36097: Use only public C-API in the_xxsubinterpreters module (adding as necessary). (#12003)" This reverts commit bcfa450f210074e16feb761ae5b3e966a2532fcf. * Revert "bpo-33608: Simplify ceval's DISPATCH by hoisting eval_breaker ahead of time. (gh-12062)" This reverts commit bda918bf65a88560ec453aaba0758a9c0d49b449. * Revert "bpo-33608: Use _Py_AddPendingCall() in _PyCrossInterpreterData_Release(). (gh-12024)" This reverts commit b05b711a2cef6c6c381e01069dedac372e0b9fb2. * Revert "bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (GH-11617)" This reverts commit ef4ac967e2f3a9a18330cc6abe14adb4bc3d0465.
Diffstat (limited to 'Include/internal/pycore_atomic.h')
-rw-r--r--Include/internal/pycore_atomic.h48
1 files changed, 24 insertions, 24 deletions
diff --git a/Include/internal/pycore_atomic.h b/Include/internal/pycore_atomic.h
index 7aa7eed..5669f71 100644
--- a/Include/internal/pycore_atomic.h
+++ b/Include/internal/pycore_atomic.h
@@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
- atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)
+ atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
- atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
+ atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
/* Use builtin atomic operations in GCC >= 4.7 */
#elif defined(HAVE_BUILTIN_ATOMIC)
@@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \
- __atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))
+ __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \
- __atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))
+ __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
/* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) and MSVC x86/x64/ARM */
@@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
}
#else
-#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
+#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#endif
inline int _Py_atomic_load_32bit(volatile int* value, int order) {
@@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
}
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
- if (sizeof((ATOMIC_VAL)->_value) == 8) { \
- _Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
- _Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
+ if (sizeof(*ATOMIC_VAL._value) == 8) { \
+ _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
+ _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
- sizeof((ATOMIC_VAL)->_value) == 8 ? \
- _Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \
- _Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \
+ sizeof(*(ATOMIC_VAL._value)) == 8 ? \
+ _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
+ _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
)
#elif defined(_M_ARM) || defined(_M_ARM64)
typedef enum _Py_memory_order {
@@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
- _InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
+ _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
break; \
case _Py_memory_order_release: \
- _InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
+ _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
break; \
default: \
- _InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
+ _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
break; \
}
#else
@@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
- _InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
+ _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
break; \
case _Py_memory_order_release: \
- _InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
+ _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
break; \
default: \
- _InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
+ _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
break; \
}
@@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
}
#else
-#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
+#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#endif
inline int _Py_atomic_load_32bit(volatile int* value, int order) {
@@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
}
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
- if (sizeof((ATOMIC_VAL)->_value) == 8) { \
- _Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
- _Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
+ if (sizeof(*ATOMIC_VAL._value) == 8) { \
+ _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
+ _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
- sizeof((ATOMIC_VAL)->_value) == 8 ? \
- _Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \
- _Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \
+ sizeof(*(ATOMIC_VAL._value)) == 8 ? \
+ _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
+ _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
)
#endif
#else /* !gcc x86 !_msc_ver */