summaryrefslogtreecommitdiffstats
path: root/Include
diff options
context:
space:
mode:
authorSam Gross <colesbury@gmail.com>2024-07-08 18:52:07 (GMT)
committerGitHub <noreply@github.com>2024-07-08 18:52:07 (GMT)
commit1d3cf79a501a93a7a488fc75d4db3060c5ee7d1a (patch)
tree2e2f59a0d466ee4abccae054281be060c578e094 /Include
parent31873bea471020ca5deaf735d9acb0f1abeb1d3c (diff)
downloadcpython-1d3cf79a501a93a7a488fc75d4db3060c5ee7d1a.zip
cpython-1d3cf79a501a93a7a488fc75d4db3060c5ee7d1a.tar.gz
cpython-1d3cf79a501a93a7a488fc75d4db3060c5ee7d1a.tar.bz2
gh-121368: Fix seq lock memory ordering in _PyType_Lookup (#121388)
The `_PySeqLock_EndRead` function needs an acquire fence to ensure that the load of the sequence happens after any loads within the read side critical section. The missing fence can trigger bugs on macOS arm64. Additionally, we need a release fence in `_PySeqLock_LockWrite` to ensure that the sequence update is visible before any modifications to the cache entry.
Diffstat (limited to 'Include')
-rw-r--r--Include/cpython/pyatomic.h3
-rw-r--r--Include/cpython/pyatomic_gcc.h4
-rw-r--r--Include/cpython/pyatomic_msc.h12
-rw-r--r--Include/cpython/pyatomic_std.h7
-rw-r--r--Include/internal/pycore_lock.h8
5 files changed, 30 insertions, 4 deletions
diff --git a/Include/cpython/pyatomic.h b/Include/cpython/pyatomic.h
index 55a139b..4ecef4f 100644
--- a/Include/cpython/pyatomic.h
+++ b/Include/cpython/pyatomic.h
@@ -510,6 +510,9 @@ _Py_atomic_load_ssize_acquire(const Py_ssize_t *obj);
// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence
static inline void _Py_atomic_fence_seq_cst(void);
+// Acquire fence
+static inline void _Py_atomic_fence_acquire(void);
+
// Release fence
static inline void _Py_atomic_fence_release(void);
diff --git a/Include/cpython/pyatomic_gcc.h b/Include/cpython/pyatomic_gcc.h
index f2ebdee..ef09954 100644
--- a/Include/cpython/pyatomic_gcc.h
+++ b/Include/cpython/pyatomic_gcc.h
@@ -543,5 +543,9 @@ _Py_atomic_fence_seq_cst(void)
{ __atomic_thread_fence(__ATOMIC_SEQ_CST); }
static inline void
+_Py_atomic_fence_acquire(void)
+{ __atomic_thread_fence(__ATOMIC_ACQUIRE); }
+
+ static inline void
_Py_atomic_fence_release(void)
{ __atomic_thread_fence(__ATOMIC_RELEASE); }
diff --git a/Include/cpython/pyatomic_msc.h b/Include/cpython/pyatomic_msc.h
index f32995c..84da21b 100644
--- a/Include/cpython/pyatomic_msc.h
+++ b/Include/cpython/pyatomic_msc.h
@@ -1069,6 +1069,18 @@ _Py_atomic_fence_seq_cst(void)
}
static inline void
+_Py_atomic_fence_acquire(void)
+{
+#if defined(_M_ARM64)
+ __dmb(_ARM64_BARRIER_ISHLD);
+#elif defined(_M_X64) || defined(_M_IX86)
+ _ReadBarrier();
+#else
+# error "no implementation of _Py_atomic_fence_acquire"
+#endif
+}
+
+ static inline void
_Py_atomic_fence_release(void)
{
#if defined(_M_ARM64)
diff --git a/Include/cpython/pyatomic_std.h b/Include/cpython/pyatomic_std.h
index 0cdce4e..7c71e94 100644
--- a/Include/cpython/pyatomic_std.h
+++ b/Include/cpython/pyatomic_std.h
@@ -962,6 +962,13 @@ _Py_atomic_fence_seq_cst(void)
}
static inline void
+_Py_atomic_fence_acquire(void)
+{
+ _Py_USING_STD;
+ atomic_thread_fence(memory_order_acquire);
+}
+
+ static inline void
_Py_atomic_fence_release(void)
{
_Py_USING_STD;
diff --git a/Include/internal/pycore_lock.h b/Include/internal/pycore_lock.h
index 3824434..e6da083 100644
--- a/Include/internal/pycore_lock.h
+++ b/Include/internal/pycore_lock.h
@@ -228,12 +228,12 @@ PyAPI_FUNC(void) _PySeqLock_AbandonWrite(_PySeqLock *seqlock);
PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock);
// End the read operation and confirm that the sequence number has not changed.
-// Returns 1 if the read was successful or 0 if the read should be re-tried.
-PyAPI_FUNC(uint32_t) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
+// Returns 1 if the read was successful or 0 if the read should be retried.
+PyAPI_FUNC(int) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
// Check if the lock was held during a fork and clear the lock. Returns 1
-// if the lock was held and any associated datat should be cleared.
-PyAPI_FUNC(uint32_t) _PySeqLock_AfterFork(_PySeqLock *seqlock);
+// if the lock was held and any associated data should be cleared.
+PyAPI_FUNC(int) _PySeqLock_AfterFork(_PySeqLock *seqlock);
#ifdef __cplusplus
}