diff options
author | Sam Gross <colesbury@gmail.com> | 2024-06-20 16:00:25 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-20 16:00:25 (GMT) |
commit | 3cb6c4cd60bec1acbcd960f5e7bd65f78152dbdd (patch) | |
tree | f5b72124507a3c8ea7e5938bdb2cdb7563ca1ac1 | |
parent | 7c7aa5a99cce256ff726654038092a333a1f0531 (diff) | |
download | cpython-3cb6c4cd60bec1acbcd960f5e7bd65f78152dbdd.zip cpython-3cb6c4cd60bec1acbcd960f5e7bd65f78152dbdd.tar.gz cpython-3cb6c4cd60bec1acbcd960f5e7bd65f78152dbdd.tar.bz2 |
[3.13] gh-117511: Make PyMutex public in the non-limited API (GH-117731) (#120800)
(cherry picked from commit 3af7263037de1d0ef63b070fc7bfc2cf042eaebe)
-rw-r--r-- | Doc/c-api/init.rst | 43 | ||||
-rw-r--r-- | Doc/whatsnew/3.13.rst | 5 | ||||
-rw-r--r-- | Include/Python.h | 1 | ||||
-rw-r--r-- | Include/cpython/lock.h | 63 | ||||
-rw-r--r-- | Include/cpython/weakrefobject.h | 2 | ||||
-rw-r--r-- | Include/internal/pycore_critical_section.h | 6 | ||||
-rw-r--r-- | Include/internal/pycore_lock.h | 68 | ||||
-rw-r--r-- | Include/internal/pycore_warnings.h | 2 | ||||
-rw-r--r-- | Include/lock.h | 16 | ||||
-rw-r--r-- | Include/object.h | 6 | ||||
-rw-r--r-- | Makefile.pre.in | 2 | ||||
-rw-r--r-- | Misc/NEWS.d/next/C API/2024-04-10-16-48-04.gh-issue-117511.RZtBRK.rst | 1 | ||||
-rw-r--r-- | Modules/_testinternalcapi/test_lock.c | 16 | ||||
-rw-r--r-- | Objects/object.c | 2 | ||||
-rw-r--r-- | PCbuild/pythoncore.vcxproj | 2 | ||||
-rw-r--r-- | PCbuild/pythoncore.vcxproj.filters | 6 | ||||
-rw-r--r-- | Python/critical_section.c | 2 | ||||
-rw-r--r-- | Python/lock.c | 52 |
18 files changed, 185 insertions, 110 deletions
diff --git a/Doc/c-api/init.rst b/Doc/c-api/init.rst index 9e118d4..58c7903 100644 --- a/Doc/c-api/init.rst +++ b/Doc/c-api/init.rst @@ -55,6 +55,11 @@ The following functions can be safely called before Python is initialized: * :c:func:`PyMem_RawCalloc` * :c:func:`PyMem_RawFree` +* Synchronization: + + * :c:func:`PyMutex_Lock` + * :c:func:`PyMutex_Unlock` + .. note:: The following functions **should not be called** before @@ -2152,3 +2157,41 @@ be used in new code. .. c:function:: void PyThread_delete_key_value(int key) .. c:function:: void PyThread_ReInitTLS() +Synchronization Primitives +========================== + +The C-API provides a basic mutual exclusion lock. + +.. c:type:: PyMutex + + A mutual exclusion lock. The :c:type:`!PyMutex` should be initialized to + zero to represent the unlocked state. For example:: + + PyMutex mutex = {0}; + + Instances of :c:type:`!PyMutex` should not be copied or moved. Both the + contents and address of a :c:type:`!PyMutex` are meaningful, and it must + remain at a fixed, writable location in memory. + + .. note:: + + A :c:type:`!PyMutex` currently occupies one byte, but the size should be + considered unstable. The size may change in future Python releases + without a deprecation period. + + .. versionadded:: 3.13 + +.. c:function:: void PyMutex_Lock(PyMutex *m) + + Lock mutex *m*. If another thread has already locked it, the calling + thread will block until the mutex is unlocked. While blocked, the thread + will temporarily release the :term:`GIL` if it is held. + + .. versionadded:: 3.13 + +.. c:function:: void PyMutex_Unlock(PyMutex *m) + + Unlock mutex *m*. The mutex must be locked --- otherwise, the function will + issue a fatal error. + + .. versionadded:: 3.13 diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index 19727b9..35853ff 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -2160,6 +2160,11 @@ New Features :c:func:`PyEval_GetLocals` return :term:`strong references <strong reference>` rather than borrowed references. (Added as part of :pep:`667`.) +* Add :c:type:`PyMutex` API, a lightweight mutex that occupies a single byte. + The :c:func:`PyMutex_Lock` function will release the GIL (if currently held) + if the operation needs to block. + (Contributed by Sam Gross in :gh:`108724`.) + Build Changes ============= diff --git a/Include/Python.h b/Include/Python.h index 92751bb..ba2724c 100644 --- a/Include/Python.h +++ b/Include/Python.h @@ -64,6 +64,7 @@ #include "pybuffer.h" #include "pystats.h" #include "pyatomic.h" +#include "lock.h" #include "object.h" #include "objimpl.h" #include "typeslots.h" diff --git a/Include/cpython/lock.h b/Include/cpython/lock.h new file mode 100644 index 0000000..8ee03e8 --- /dev/null +++ b/Include/cpython/lock.h @@ -0,0 +1,63 @@ +#ifndef Py_CPYTHON_LOCK_H +# error "this header file must not be included directly" +#endif + +#define _Py_UNLOCKED 0 +#define _Py_LOCKED 1 + +// A mutex that occupies one byte. The lock can be zero initialized to +// represent the unlocked state. +// +// Typical initialization: +// PyMutex m = (PyMutex){0}; +// +// Or initialize as global variables: +// static PyMutex m; +// +// Typical usage: +// PyMutex_Lock(&m); +// ... +// PyMutex_Unlock(&m); +// +// The contents of the PyMutex are not part of the public API, but are +// described to aid in understanding the implementation and debugging. Only +// the two least significant bits are used. The remaining bits are always zero: +// 0b00: unlocked +// 0b01: locked +// 0b10: unlocked and has parked threads +// 0b11: locked and has parked threads +typedef struct PyMutex { + uint8_t _bits; // (private) +} PyMutex; + +// exported function for locking the mutex +PyAPI_FUNC(void) PyMutex_Lock(PyMutex *m); + +// exported function for unlocking the mutex +PyAPI_FUNC(void) PyMutex_Unlock(PyMutex *m); + +// Locks the mutex. +// +// If the mutex is currently locked, the calling thread will be parked until +// the mutex is unlocked. If the current thread holds the GIL, then the GIL +// will be released while the thread is parked. +static inline void +_PyMutex_Lock(PyMutex *m) +{ + uint8_t expected = _Py_UNLOCKED; + if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) { + PyMutex_Lock(m); + } +} +#define PyMutex_Lock _PyMutex_Lock + +// Unlocks the mutex. +static inline void +_PyMutex_Unlock(PyMutex *m) +{ + uint8_t expected = _Py_LOCKED; + if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_UNLOCKED)) { + PyMutex_Unlock(m); + } +} +#define PyMutex_Unlock _PyMutex_Unlock diff --git a/Include/cpython/weakrefobject.h b/Include/cpython/weakrefobject.h index dcca166..28acf72 100644 --- a/Include/cpython/weakrefobject.h +++ b/Include/cpython/weakrefobject.h @@ -36,7 +36,7 @@ struct _PyWeakReference { * Normally this can be derived from wr_object, but in some cases we need * to lock after wr_object has been set to Py_None. */ - struct _PyMutex *weakrefs_lock; + PyMutex *weakrefs_lock; #endif }; diff --git a/Include/internal/pycore_critical_section.h b/Include/internal/pycore_critical_section.h index 7bebcdb..3e15c3a 100644 --- a/Include/internal/pycore_critical_section.h +++ b/Include/internal/pycore_critical_section.h @@ -202,7 +202,7 @@ _PyCriticalSection2_BeginSlow(_PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2, static inline void _PyCriticalSection_Begin(_PyCriticalSection *c, PyMutex *m) { - if (PyMutex_LockFast(&m->v)) { + if (PyMutex_LockFast(&m->_bits)) { PyThreadState *tstate = _PyThreadState_GET(); c->mutex = m; c->prev = tstate->critical_section; @@ -255,8 +255,8 @@ _PyCriticalSection2_Begin(_PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2) m2 = tmp; } - if (PyMutex_LockFast(&m1->v)) { - if (PyMutex_LockFast(&m2->v)) { + if (PyMutex_LockFast(&m1->_bits)) { + if (PyMutex_LockFast(&m2->_bits)) { PyThreadState *tstate = _PyThreadState_GET(); c->base.mutex = m1; c->mutex2 = m2; diff --git a/Include/internal/pycore_lock.h b/Include/internal/pycore_lock.h index d5853b2..80d6098 100644 --- a/Include/internal/pycore_lock.h +++ b/Include/internal/pycore_lock.h @@ -13,48 +13,10 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif - -// A mutex that occupies one byte. The lock can be zero initialized. -// -// Only the two least significant bits are used. The remaining bits should be -// zero: -// 0b00: unlocked -// 0b01: locked -// 0b10: unlocked and has parked threads -// 0b11: locked and has parked threads -// -// Typical initialization: -// PyMutex m = (PyMutex){0}; -// -// Or initialize as global variables: -// static PyMutex m; -// -// Typical usage: -// PyMutex_Lock(&m); -// ... -// PyMutex_Unlock(&m); - -// NOTE: In Py_GIL_DISABLED builds, `struct _PyMutex` is defined in Include/object.h. -// The Py_GIL_DISABLED builds need the definition in Include/object.h for the -// `ob_mutex` field in PyObject. For the default (non-free-threaded) build, -// we define the struct here to avoid exposing it in the public API. -#ifndef Py_GIL_DISABLED -struct _PyMutex { uint8_t v; }; -#endif - -typedef struct _PyMutex PyMutex; - -#define _Py_UNLOCKED 0 -#define _Py_LOCKED 1 +//_Py_UNLOCKED is defined as 0 and _Py_LOCKED as 1 in Include/cpython/lock.h #define _Py_HAS_PARKED 2 #define _Py_ONCE_INITIALIZED 4 -// (private) slow path for locking the mutex -PyAPI_FUNC(void) _PyMutex_LockSlow(PyMutex *m); - -// (private) slow path for unlocking the mutex -PyAPI_FUNC(void) _PyMutex_UnlockSlow(PyMutex *m); - static inline int PyMutex_LockFast(uint8_t *lock_bits) { @@ -62,35 +24,11 @@ PyMutex_LockFast(uint8_t *lock_bits) return _Py_atomic_compare_exchange_uint8(lock_bits, &expected, _Py_LOCKED); } -// Locks the mutex. -// -// If the mutex is currently locked, the calling thread will be parked until -// the mutex is unlocked. If the current thread holds the GIL, then the GIL -// will be released while the thread is parked. -static inline void -PyMutex_Lock(PyMutex *m) -{ - uint8_t expected = _Py_UNLOCKED; - if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_LOCKED)) { - _PyMutex_LockSlow(m); - } -} - -// Unlocks the mutex. -static inline void -PyMutex_Unlock(PyMutex *m) -{ - uint8_t expected = _Py_LOCKED; - if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_UNLOCKED)) { - _PyMutex_UnlockSlow(m); - } -} - // Checks if the mutex is currently locked. static inline int PyMutex_IsLocked(PyMutex *m) { - return (_Py_atomic_load_uint8(&m->v) & _Py_LOCKED) != 0; + return (_Py_atomic_load_uint8(&m->_bits) & _Py_LOCKED) != 0; } // Re-initializes the mutex after a fork to the unlocked state. @@ -121,7 +59,7 @@ static inline void PyMutex_LockFlags(PyMutex *m, _PyLockFlags flags) { uint8_t expected = _Py_UNLOCKED; - if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_LOCKED)) { + if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) { _PyMutex_LockTimed(m, -1, flags); } } diff --git a/Include/internal/pycore_warnings.h b/Include/internal/pycore_warnings.h index 114796d..f9f6559 100644 --- a/Include/internal/pycore_warnings.h +++ b/Include/internal/pycore_warnings.h @@ -14,7 +14,7 @@ struct _warnings_runtime_state { PyObject *filters; /* List */ PyObject *once_registry; /* Dict */ PyObject *default_action; /* String */ - struct _PyMutex mutex; + PyMutex mutex; long filters_version; }; diff --git a/Include/lock.h b/Include/lock.h new file mode 100644 index 0000000..782b9db --- /dev/null +++ b/Include/lock.h @@ -0,0 +1,16 @@ +#ifndef Py_LOCK_H +#define Py_LOCK_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +# define Py_CPYTHON_LOCK_H +# include "cpython/lock.h" +# undef Py_CPYTHON_LOCK_H +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_LOCK_H */ diff --git a/Include/object.h b/Include/object.h index 05d9056..fa9c2a5 100644 --- a/Include/object.h +++ b/Include/object.h @@ -204,17 +204,13 @@ struct _object { // Create a shared field from a refcnt and desired flags #define _Py_REF_SHARED(refcnt, flags) (((refcnt) << _Py_REF_SHARED_SHIFT) + (flags)) -// NOTE: In non-free-threaded builds, `struct _PyMutex` is defined in -// pycore_lock.h. See pycore_lock.h for more details. -struct _PyMutex { uint8_t v; }; - struct _object { // ob_tid stores the thread id (or zero). It is also used by the GC and the // trashcan mechanism as a linked list pointer and by the GC to store the // computed "gc_refs" refcount. uintptr_t ob_tid; uint16_t _padding; - struct _PyMutex ob_mutex; // per-object lock + PyMutex ob_mutex; // per-object lock uint8_t ob_gc_bits; // gc-related state uint32_t ob_ref_local; // local reference count Py_ssize_t ob_ref_shared; // shared (atomic) reference count diff --git a/Makefile.pre.in b/Makefile.pre.in index a8a57d2..c587b0d 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1019,6 +1019,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/intrcheck.h \ $(srcdir)/Include/iterobject.h \ $(srcdir)/Include/listobject.h \ + $(srcdir)/Include/lock.h \ $(srcdir)/Include/longobject.h \ $(srcdir)/Include/marshal.h \ $(srcdir)/Include/memoryobject.h \ @@ -1091,6 +1092,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/cpython/import.h \ $(srcdir)/Include/cpython/initconfig.h \ $(srcdir)/Include/cpython/listobject.h \ + $(srcdir)/Include/cpython/lock.h \ $(srcdir)/Include/cpython/longintrepr.h \ $(srcdir)/Include/cpython/longobject.h \ $(srcdir)/Include/cpython/memoryobject.h \ diff --git a/Misc/NEWS.d/next/C API/2024-04-10-16-48-04.gh-issue-117511.RZtBRK.rst b/Misc/NEWS.d/next/C API/2024-04-10-16-48-04.gh-issue-117511.RZtBRK.rst new file mode 100644 index 0000000..586685a --- /dev/null +++ b/Misc/NEWS.d/next/C API/2024-04-10-16-48-04.gh-issue-117511.RZtBRK.rst @@ -0,0 +1 @@ +Make the :c:type:`PyMutex` public in the non-limited C API. diff --git a/Modules/_testinternalcapi/test_lock.c b/Modules/_testinternalcapi/test_lock.c index 1544fe1..8d67841 100644 --- a/Modules/_testinternalcapi/test_lock.c +++ b/Modules/_testinternalcapi/test_lock.c @@ -36,9 +36,9 @@ test_lock_basic(PyObject *self, PyObject *obj) // uncontended lock and unlock PyMutex_Lock(&m); - assert(m.v == 1); + assert(m._bits == 1); PyMutex_Unlock(&m); - assert(m.v == 0); + assert(m._bits == 0); Py_RETURN_NONE; } @@ -57,10 +57,10 @@ lock_thread(void *arg) _Py_atomic_store_int(&test_data->started, 1); PyMutex_Lock(m); - assert(m->v == 1); + assert(m->_bits == 1); PyMutex_Unlock(m); - assert(m->v == 0); + assert(m->_bits == 0); _PyEvent_Notify(&test_data->done); } @@ -73,7 +73,7 @@ test_lock_two_threads(PyObject *self, PyObject *obj) memset(&test_data, 0, sizeof(test_data)); PyMutex_Lock(&test_data.m); - assert(test_data.m.v == 1); + assert(test_data.m._bits == 1); PyThread_start_new_thread(lock_thread, &test_data); @@ -82,17 +82,17 @@ test_lock_two_threads(PyObject *self, PyObject *obj) uint8_t v; do { pysleep(10); // allow some time for the other thread to try to lock - v = _Py_atomic_load_uint8_relaxed(&test_data.m.v); + v = _Py_atomic_load_uint8_relaxed(&test_data.m._bits); assert(v == 1 || v == 3); iters++; } while (v != 3 && iters < 200); // both the "locked" and the "has parked" bits should be set - assert(test_data.m.v == 3); + assert(test_data.m._bits == 3); PyMutex_Unlock(&test_data.m); PyEvent_Wait(&test_data.done); - assert(test_data.m.v == 0); + assert(test_data.m._bits == 0); Py_RETURN_NONE; } diff --git a/Objects/object.c b/Objects/object.c index 1127fed..0355303 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2376,7 +2376,7 @@ new_reference(PyObject *op) #else op->ob_tid = _Py_ThreadId(); op->_padding = 0; - op->ob_mutex = (struct _PyMutex){ 0 }; + op->ob_mutex = (PyMutex){ 0 }; op->ob_gc_bits = 0; op->ob_ref_local = 1; op->ob_ref_shared = 0; diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 16fb424..a415d45 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -156,6 +156,7 @@ <ClInclude Include="..\Include\cpython\import.h" /> <ClInclude Include="..\Include\cpython\initconfig.h" /> <ClInclude Include="..\Include\cpython\listobject.h" /> + <ClInclude Include="..\Include\cpython\lock.h" /> <ClInclude Include="..\Include\cpython\longintrepr.h" /> <ClInclude Include="..\Include\cpython\longobject.h" /> <ClInclude Include="..\Include\cpython\memoryobject.h" /> @@ -310,6 +311,7 @@ <ClInclude Include="..\Include\intrcheck.h" /> <ClInclude Include="..\Include\iterobject.h" /> <ClInclude Include="..\Include\listobject.h" /> + <ClInclude Include="..\Include\lock.h" /> <ClInclude Include="..\Include\longobject.h" /> <ClInclude Include="..\Include\marshal.h" /> <ClInclude Include="..\Include\memoryobject.h" /> diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index cf9bc0f..2048d9a 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -114,6 +114,9 @@ <ClInclude Include="..\Include\listobject.h"> <Filter>Include</Filter> </ClInclude> + <ClInclude Include="..\Include\lock.h"> + <Filter>Include</Filter> + </ClInclude> <ClInclude Include="..\Include\longobject.h"> <Filter>Include</Filter> </ClInclude> @@ -393,6 +396,9 @@ <ClInclude Include="..\Include\cpython\listobject.h"> <Filter>Include\cpython</Filter> </ClInclude> + <ClInclude Include="..\Include\cpython\lock.h"> + <Filter>Include</Filter> + </ClInclude> <ClInclude Include="..\Include\cpython\longintrepr.h"> <Filter>Include</Filter> </ClInclude> diff --git a/Python/critical_section.c b/Python/critical_section.c index 2214d80..ac679ac 100644 --- a/Python/critical_section.c +++ b/Python/critical_section.c @@ -14,7 +14,7 @@ _PyCriticalSection_BeginSlow(_PyCriticalSection *c, PyMutex *m) c->prev = (uintptr_t)tstate->critical_section; tstate->critical_section = (uintptr_t)c; - _PyMutex_LockSlow(m); + PyMutex_Lock(m); c->mutex = m; } diff --git a/Python/lock.c b/Python/lock.c index 555f4c2..7c6a517 100644 --- a/Python/lock.c +++ b/Python/lock.c @@ -47,18 +47,12 @@ _Py_yield(void) #endif } -void -_PyMutex_LockSlow(PyMutex *m) -{ - _PyMutex_LockTimed(m, -1, _PY_LOCK_DETACH); -} - PyLockStatus _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags) { - uint8_t v = _Py_atomic_load_uint8_relaxed(&m->v); + uint8_t v = _Py_atomic_load_uint8_relaxed(&m->_bits); if ((v & _Py_LOCKED) == 0) { - if (_Py_atomic_compare_exchange_uint8(&m->v, &v, v|_Py_LOCKED)) { + if (_Py_atomic_compare_exchange_uint8(&m->_bits, &v, v|_Py_LOCKED)) { return PY_LOCK_ACQUIRED; } } @@ -83,7 +77,7 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags) for (;;) { if ((v & _Py_LOCKED) == 0) { // The lock is unlocked. Try to grab it. - if (_Py_atomic_compare_exchange_uint8(&m->v, &v, v|_Py_LOCKED)) { + if (_Py_atomic_compare_exchange_uint8(&m->_bits, &v, v|_Py_LOCKED)) { return PY_LOCK_ACQUIRED; } continue; @@ -104,17 +98,17 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags) if (!(v & _Py_HAS_PARKED)) { // We are the first waiter. Set the _Py_HAS_PARKED flag. newv = v | _Py_HAS_PARKED; - if (!_Py_atomic_compare_exchange_uint8(&m->v, &v, newv)) { + if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &v, newv)) { continue; } } - int ret = _PyParkingLot_Park(&m->v, &newv, sizeof(newv), timeout, + int ret = _PyParkingLot_Park(&m->_bits, &newv, sizeof(newv), timeout, &entry, (flags & _PY_LOCK_DETACH) != 0); if (ret == Py_PARK_OK) { if (entry.handed_off) { // We own the lock now. - assert(_Py_atomic_load_uint8_relaxed(&m->v) & _Py_LOCKED); + assert(_Py_atomic_load_uint8_relaxed(&m->_bits) & _Py_LOCKED); return PY_LOCK_ACQUIRED; } } @@ -136,7 +130,7 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags) } } - v = _Py_atomic_load_uint8_relaxed(&m->v); + v = _Py_atomic_load_uint8_relaxed(&m->_bits); } } @@ -158,13 +152,13 @@ mutex_unpark(PyMutex *m, struct mutex_entry *entry, int has_more_waiters) v |= _Py_HAS_PARKED; } } - _Py_atomic_store_uint8(&m->v, v); + _Py_atomic_store_uint8(&m->_bits, v); } int _PyMutex_TryUnlock(PyMutex *m) { - uint8_t v = _Py_atomic_load_uint8(&m->v); + uint8_t v = _Py_atomic_load_uint8(&m->_bits); for (;;) { if ((v & _Py_LOCKED) == 0) { // error: the mutex is not locked @@ -172,24 +166,16 @@ _PyMutex_TryUnlock(PyMutex *m) } else if ((v & _Py_HAS_PARKED)) { // wake up a single thread - _PyParkingLot_Unpark(&m->v, (_Py_unpark_fn_t *)mutex_unpark, m); + _PyParkingLot_Unpark(&m->_bits, (_Py_unpark_fn_t *)mutex_unpark, m); return 0; } - else if (_Py_atomic_compare_exchange_uint8(&m->v, &v, _Py_UNLOCKED)) { + else if (_Py_atomic_compare_exchange_uint8(&m->_bits, &v, _Py_UNLOCKED)) { // fast-path: no waiters return 0; } } } -void -_PyMutex_UnlockSlow(PyMutex *m) -{ - if (_PyMutex_TryUnlock(m) < 0) { - Py_FatalError("unlocking mutex that is not locked"); - } -} - // _PyRawMutex stores a linked list of `struct raw_mutex_entry`, one for each // thread waiting on the mutex, directly in the mutex itself. struct raw_mutex_entry { @@ -584,3 +570,19 @@ uint32_t _PySeqLock_AfterFork(_PySeqLock *seqlock) return 0; } + +#undef PyMutex_Lock +void +PyMutex_Lock(PyMutex *m) +{ + _PyMutex_LockTimed(m, -1, _PY_LOCK_DETACH); +} + +#undef PyMutex_Unlock +void +PyMutex_Unlock(PyMutex *m) +{ + if (_PyMutex_TryUnlock(m) < 0) { + Py_FatalError("unlocking mutex that is not locked"); + } +} |