diff options
| author | Sam Gross <colesbury@gmail.com> | 2023-12-07 19:33:40 (GMT) |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-12-07 19:33:40 (GMT) |
| commit | cf6110ba1337cb67e5867d86e7c0e8d923a5bc8d (patch) | |
| tree | ab8393161d5ce01479bfb03dce874ab52246d28f /Python/ceval_gil.c | |
| parent | db460735af7503984d1b7d878069722db44b11e8 (diff) | |
| download | cpython-cf6110ba1337cb67e5867d86e7c0e8d923a5bc8d.zip cpython-cf6110ba1337cb67e5867d86e7c0e8d923a5bc8d.tar.gz cpython-cf6110ba1337cb67e5867d86e7c0e8d923a5bc8d.tar.bz2 | |
gh-111924: Use PyMutex for Runtime-global Locks. (gh-112207)
This replaces some usages of PyThread_type_lock with PyMutex, which does not require memory allocation to initialize.
This simplifies some of the runtime initialization and is also one step towards avoiding changing the default raw memory allocator during initialize/finalization, which can be non-thread-safe in some circumstances.
Diffstat (limited to 'Python/ceval_gil.c')
| -rw-r--r-- | Python/ceval_gil.c | 37 |
1 files changed, 9 insertions, 28 deletions
diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c index 92c4b2f..636e4db 100644 --- a/Python/ceval_gil.c +++ b/Python/ceval_gil.c @@ -589,9 +589,7 @@ _PyEval_ReInitThreads(PyThreadState *tstate) take_gil(tstate); struct _pending_calls *pending = &tstate->interp->ceval.pending; - if (_PyThread_at_fork_reinit(&pending->lock) < 0) { - return _PyStatus_ERR("Can't reinitialize pending calls lock"); - } + _PyMutex_at_fork_reinit(&pending->mutex); /* Destroy all threads except the current one */ _PyThreadState_DeleteExcept(tstate); @@ -720,13 +718,10 @@ _PyEval_AddPendingCall(PyInterpreterState *interp, assert(_Py_IsMainInterpreter(interp)); pending = &_PyRuntime.ceval.pending_mainthread; } - /* Ensure that _PyEval_InitState() was called - and that _PyEval_FiniState() is not called yet. */ - assert(pending->lock != NULL); - PyThread_acquire_lock(pending->lock, WAIT_LOCK); + PyMutex_Lock(&pending->mutex); int result = _push_pending_call(pending, func, arg, flags); - PyThread_release_lock(pending->lock); + PyMutex_Unlock(&pending->mutex); /* signal main loop */ SIGNAL_PENDING_CALLS(interp); @@ -768,9 +763,9 @@ _make_pending_calls(struct _pending_calls *pending) int flags = 0; /* pop one item off the queue while holding the lock */ - PyThread_acquire_lock(pending->lock, WAIT_LOCK); + PyMutex_Lock(&pending->mutex); _pop_pending_call(pending, &func, &arg, &flags); - PyThread_release_lock(pending->lock); + PyMutex_Unlock(&pending->mutex); /* having released the lock, perform the callback */ if (func == NULL) { @@ -795,7 +790,7 @@ make_pending_calls(PyInterpreterState *interp) /* Only one thread (per interpreter) may run the pending calls at once. In the same way, we don't do recursive pending calls. */ - PyThread_acquire_lock(pending->lock, WAIT_LOCK); + PyMutex_Lock(&pending->mutex); if (pending->busy) { /* A pending call was added after another thread was already handling the pending calls (and had already "unsignaled"). @@ -807,11 +802,11 @@ make_pending_calls(PyInterpreterState *interp) care of any remaining pending calls. Until then, though, all the interpreter's threads will be tripping the eval breaker every time it's checked. */ - PyThread_release_lock(pending->lock); + PyMutex_Unlock(&pending->mutex); return 0; } pending->busy = 1; - PyThread_release_lock(pending->lock); + PyMutex_Unlock(&pending->mutex); /* unsignal before starting to call callbacks, so that any callback added in-between re-signals */ @@ -892,23 +887,9 @@ Py_MakePendingCalls(void) } void -_PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock) +_PyEval_InitState(PyInterpreterState *interp) { _gil_initialize(&interp->_gil); - - struct _pending_calls *pending = &interp->ceval.pending; - assert(pending->lock == NULL); - pending->lock = pending_lock; -} - -void -_PyEval_FiniState(struct _ceval_state *ceval) -{ - struct _pending_calls *pending = &ceval->pending; - if (pending->lock != NULL) { - PyThread_free_lock(pending->lock); - pending->lock = NULL; - } } |
