From a4a9f2e879c0c9572e0cecbc702dc1dd31f80221 Mon Sep 17 00:00:00 2001 From: Mark Shannon Date: Wed, 24 Aug 2022 14:21:01 +0100 Subject: GH-96177: Move GIL and eval breaker code out of ceval.c into ceval_gil.c. (GH-96204) --- Include/internal/pycore_ceval.h | 3 + Include/internal/pycore_pystate.h | 7 +- Makefile.pre.in | 4 +- PCbuild/_freeze_module.vcxproj | 1 + PCbuild/_freeze_module.vcxproj.filters | 3 + PCbuild/pythoncore.vcxproj | 2 +- PCbuild/pythoncore.vcxproj.filters | 6 +- Python/ceval.c | 642 +-------------------- Python/ceval_gil.c | 986 +++++++++++++++++++++++++++++++++ Python/ceval_gil.h | 333 ----------- Tools/c-analyzer/cpython/_parser.py | 1 - Tools/gdb/libpython.py | 2 +- 12 files changed, 1005 insertions(+), 985 deletions(-) create mode 100644 Python/ceval_gil.c delete mode 100644 Python/ceval_gil.h diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index 1b99930..2fcdaad 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -133,6 +133,9 @@ extern struct _PyInterpreterFrame* _PyEval_GetFrame(void); extern PyObject* _Py_MakeCoro(PyFunctionObject *func); +extern int _Py_HandlePending(PyThreadState *tstate); + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index 51d119c..3d6d400 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -85,13 +85,14 @@ _PyThreadState_GET(void) return _PyRuntimeState_GetThreadState(&_PyRuntime); } -PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalError_TstateNULL(const char *func); - static inline void _Py_EnsureFuncTstateNotNULL(const char *func, PyThreadState *tstate) { if (tstate == NULL) { - _Py_FatalError_TstateNULL(func); + _Py_FatalErrorFunc(func, + "the function must be called with the GIL held, " + "after Python initialization and before Python finalization, " + "but the GIL is released (the current Python thread state is NULL)"); } } diff --git a/Makefile.pre.in b/Makefile.pre.in index 414e604..3491e91 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -388,6 +388,7 @@ PYTHON_OBJS= \ Python/getcopyright.o \ Python/getplatform.o \ Python/getversion.o \ + Python/ceval_gil.o \ Python/hamt.o \ Python/hashtable.o \ Python/import.o \ @@ -1419,8 +1420,7 @@ regen-opcode-targets: $(srcdir)/Python/opcode_targets.h.new $(UPDATE_FILE) $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/opcode_targets.h.new -Python/ceval.o: $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/ceval_gil.h \ - $(srcdir)/Python/condvar.h +Python/ceval.o: $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/condvar.h Python/frozen.o: $(FROZEN_FILES_OUT) diff --git a/PCbuild/_freeze_module.vcxproj b/PCbuild/_freeze_module.vcxproj index 5821c3d..4c00729 100644 --- a/PCbuild/_freeze_module.vcxproj +++ b/PCbuild/_freeze_module.vcxproj @@ -199,6 +199,7 @@ + diff --git a/PCbuild/_freeze_module.vcxproj.filters b/PCbuild/_freeze_module.vcxproj.filters index b657f56..5c98499 100644 --- a/PCbuild/_freeze_module.vcxproj.filters +++ b/PCbuild/_freeze_module.vcxproj.filters @@ -184,6 +184,9 @@ Source Files + + Source Files + Source Files diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 3ff4be5..45e5013 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -327,7 +327,6 @@ - @@ -502,6 +501,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 64d248d..581ea6e 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -312,9 +312,6 @@ Python - - Python - Include @@ -1097,6 +1094,9 @@ Python + + Python + Modules diff --git a/Python/ceval.c b/Python/ceval.c index 7024add..1ab104c 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -13,13 +13,11 @@ #include "pycore_ceval.h" // _PyEval_SignalAsyncExc() #include "pycore_code.h" #include "pycore_function.h" -#include "pycore_initconfig.h" // _PyStatus_OK() #include "pycore_long.h" // _PyLong_GetZero() #include "pycore_object.h" // _PyObject_GC_TRACK() #include "pycore_moduleobject.h" // PyModuleObject #include "pycore_opcode.h" // EXTRA_CASES #include "pycore_pyerrors.h" // _PyErr_Fetch() -#include "pycore_pylifecycle.h" // _PyErr_Print() #include "pycore_pymem.h" // _PyMem_IsPtrFreed() #include "pycore_pystate.h" // _PyInterpreterState_GET() #include "pycore_range.h" // _PyRangeIterObject @@ -237,582 +235,9 @@ is_tstate_valid(PyThreadState *tstate) #endif -/* This can set eval_breaker to 0 even though gil_drop_request became - 1. We believe this is all right because the eval loop will release - the GIL eventually anyway. */ -static inline void -COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, - struct _ceval_runtime_state *ceval, - struct _ceval_state *ceval2) -{ - _Py_atomic_store_relaxed(&ceval2->eval_breaker, - _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request) - | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending) - && _Py_ThreadCanHandleSignals(interp)) - | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do) - && _Py_ThreadCanHandlePendingCalls()) - | ceval2->pending.async_exc); -} - - -static inline void -SET_GIL_DROP_REQUEST(PyInterpreterState *interp) -{ - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1); - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); -} - - -static inline void -RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - -static inline void -SIGNAL_PENDING_CALLS(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - -static inline void -UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - -static inline void -SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->signals_pending, 1); - if (force) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); - } - else { - /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); - } -} - - -static inline void -UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->signals_pending, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - -static inline void -SIGNAL_ASYNC_EXC(PyInterpreterState *interp) -{ - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 1; - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); -} - - -static inline void -UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 0; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - #ifdef HAVE_ERRNO_H #include #endif -#include "ceval_gil.h" - -void _Py_NO_RETURN -_Py_FatalError_TstateNULL(const char *func) -{ - _Py_FatalErrorFunc(func, - "the function must be called with the GIL held, " - "after Python initialization and before Python finalization, " - "but the GIL is released (the current Python thread state is NULL)"); -} - -int -_PyEval_ThreadsInitialized(_PyRuntimeState *runtime) -{ - return gil_created(&runtime->ceval.gil); -} - -int -PyEval_ThreadsInitialized(void) -{ - _PyRuntimeState *runtime = &_PyRuntime; - return _PyEval_ThreadsInitialized(runtime); -} - -PyStatus -_PyEval_InitGIL(PyThreadState *tstate) -{ - if (!_Py_IsMainInterpreter(tstate->interp)) { - /* Currently, the GIL is shared by all interpreters, - and only the main interpreter is responsible to create - and destroy it. */ - return _PyStatus_OK(); - } - - struct _gil_runtime_state *gil = &tstate->interp->runtime->ceval.gil; - assert(!gil_created(gil)); - - PyThread_init_thread(); - create_gil(gil); - - take_gil(tstate); - - assert(gil_created(gil)); - return _PyStatus_OK(); -} - -void -_PyEval_FiniGIL(PyInterpreterState *interp) -{ - if (!_Py_IsMainInterpreter(interp)) { - /* Currently, the GIL is shared by all interpreters, - and only the main interpreter is responsible to create - and destroy it. */ - return; - } - - struct _gil_runtime_state *gil = &interp->runtime->ceval.gil; - if (!gil_created(gil)) { - /* First Py_InitializeFromConfig() call: the GIL doesn't exist - yet: do nothing. */ - return; - } - - destroy_gil(gil); - assert(!gil_created(gil)); -} - -void -PyEval_InitThreads(void) -{ - /* Do nothing: kept for backward compatibility */ -} - -void -_PyEval_Fini(void) -{ -#ifdef Py_STATS - _Py_PrintSpecializationStats(1); -#endif -} - -void -PyEval_AcquireLock(void) -{ - _PyRuntimeState *runtime = &_PyRuntime; - PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); - _Py_EnsureTstateNotNULL(tstate); - - take_gil(tstate); -} - -void -PyEval_ReleaseLock(void) -{ - _PyRuntimeState *runtime = &_PyRuntime; - PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); - /* This function must succeed when the current thread state is NULL. - We therefore avoid PyThreadState_Get() which dumps a fatal error - in debug mode. */ - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - drop_gil(ceval, ceval2, tstate); -} - -void -_PyEval_ReleaseLock(PyThreadState *tstate) -{ - struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - drop_gil(ceval, ceval2, tstate); -} - -void -PyEval_AcquireThread(PyThreadState *tstate) -{ - _Py_EnsureTstateNotNULL(tstate); - - take_gil(tstate); - - struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate; - if (_PyThreadState_Swap(gilstate, tstate) != NULL) { - Py_FatalError("non-NULL old thread state"); - } -} - -void -PyEval_ReleaseThread(PyThreadState *tstate) -{ - assert(is_tstate_valid(tstate)); - - _PyRuntimeState *runtime = tstate->interp->runtime; - PyThreadState *new_tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); - if (new_tstate != tstate) { - Py_FatalError("wrong thread state"); - } - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - drop_gil(ceval, ceval2, tstate); -} - -#ifdef HAVE_FORK -/* This function is called from PyOS_AfterFork_Child to destroy all threads - which are not running in the child process, and clear internal locks - which might be held by those threads. */ -PyStatus -_PyEval_ReInitThreads(PyThreadState *tstate) -{ - _PyRuntimeState *runtime = tstate->interp->runtime; - - struct _gil_runtime_state *gil = &runtime->ceval.gil; - if (!gil_created(gil)) { - return _PyStatus_OK(); - } - recreate_gil(gil); - - take_gil(tstate); - - struct _pending_calls *pending = &tstate->interp->ceval.pending; - if (_PyThread_at_fork_reinit(&pending->lock) < 0) { - return _PyStatus_ERR("Can't reinitialize pending calls lock"); - } - - /* Destroy all threads except the current one */ - _PyThreadState_DeleteExcept(runtime, tstate); - return _PyStatus_OK(); -} -#endif - -/* This function is used to signal that async exceptions are waiting to be - raised. */ - -void -_PyEval_SignalAsyncExc(PyInterpreterState *interp) -{ - SIGNAL_ASYNC_EXC(interp); -} - -PyThreadState * -PyEval_SaveThread(void) -{ - _PyRuntimeState *runtime = &_PyRuntime; - PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); - _Py_EnsureTstateNotNULL(tstate); - - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - assert(gil_created(&ceval->gil)); - drop_gil(ceval, ceval2, tstate); - return tstate; -} - -void -PyEval_RestoreThread(PyThreadState *tstate) -{ - _Py_EnsureTstateNotNULL(tstate); - - take_gil(tstate); - - struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate; - _PyThreadState_Swap(gilstate, tstate); -} - - -/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX - signal handlers or Mac I/O completion routines) can schedule calls - to a function to be called synchronously. - The synchronous function is called with one void* argument. - It should return 0 for success or -1 for failure -- failure should - be accompanied by an exception. - - If registry succeeds, the registry function returns 0; if it fails - (e.g. due to too many pending calls) it returns -1 (without setting - an exception condition). - - Note that because registry may occur from within signal handlers, - or other asynchronous events, calling malloc() is unsafe! - - Any thread can schedule pending calls, but only the main thread - will execute them. - There is no facility to schedule calls to a particular thread, but - that should be easy to change, should that ever be required. In - that case, the static variables here should go into the python - threadstate. -*/ - -void -_PyEval_SignalReceived(PyInterpreterState *interp) -{ -#ifdef MS_WINDOWS - // bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal - // handler which can run in a thread different than the Python thread, in - // which case _Py_ThreadCanHandleSignals() is wrong. Ignore - // _Py_ThreadCanHandleSignals() and always set eval_breaker to 1. - // - // The next eval_frame_handle_pending() call will call - // _Py_ThreadCanHandleSignals() to recompute eval_breaker. - int force = 1; -#else - int force = 0; -#endif - /* bpo-30703: Function called when the C signal handler of Python gets a - signal. We cannot queue a callback using _PyEval_AddPendingCall() since - that function is not async-signal-safe. */ - SIGNAL_PENDING_SIGNALS(interp, force); -} - -/* Push one item onto the queue while holding the lock. */ -static int -_push_pending_call(struct _pending_calls *pending, - int (*func)(void *), void *arg) -{ - int i = pending->last; - int j = (i + 1) % NPENDINGCALLS; - if (j == pending->first) { - return -1; /* Queue full */ - } - pending->calls[i].func = func; - pending->calls[i].arg = arg; - pending->last = j; - return 0; -} - -/* Pop one item off the queue while holding the lock. */ -static void -_pop_pending_call(struct _pending_calls *pending, - int (**func)(void *), void **arg) -{ - int i = pending->first; - if (i == pending->last) { - return; /* Queue empty */ - } - - *func = pending->calls[i].func; - *arg = pending->calls[i].arg; - pending->first = (i + 1) % NPENDINGCALLS; -} - -/* This implementation is thread-safe. It allows - scheduling to be made from any thread, and even from an executing - callback. - */ - -int -_PyEval_AddPendingCall(PyInterpreterState *interp, - int (*func)(void *), void *arg) -{ - struct _pending_calls *pending = &interp->ceval.pending; - - /* Ensure that _PyEval_InitState() was called - and that _PyEval_FiniState() is not called yet. */ - assert(pending->lock != NULL); - - PyThread_acquire_lock(pending->lock, WAIT_LOCK); - int result = _push_pending_call(pending, func, arg); - PyThread_release_lock(pending->lock); - - /* signal main loop */ - SIGNAL_PENDING_CALLS(interp); - return result; -} - -int -Py_AddPendingCall(int (*func)(void *), void *arg) -{ - /* Best-effort to support subinterpreters and calls with the GIL released. - - First attempt _PyThreadState_GET() since it supports subinterpreters. - - If the GIL is released, _PyThreadState_GET() returns NULL . In this - case, use PyGILState_GetThisThreadState() which works even if the GIL - is released. - - Sadly, PyGILState_GetThisThreadState() doesn't support subinterpreters: - see bpo-10915 and bpo-15751. - - Py_AddPendingCall() doesn't require the caller to hold the GIL. */ - PyThreadState *tstate = _PyThreadState_GET(); - if (tstate == NULL) { - tstate = PyGILState_GetThisThreadState(); - } - - PyInterpreterState *interp; - if (tstate != NULL) { - interp = tstate->interp; - } - else { - /* Last resort: use the main interpreter */ - interp = _PyInterpreterState_Main(); - } - return _PyEval_AddPendingCall(interp, func, arg); -} - -static int -handle_signals(PyThreadState *tstate) -{ - assert(is_tstate_valid(tstate)); - if (!_Py_ThreadCanHandleSignals(tstate->interp)) { - return 0; - } - - UNSIGNAL_PENDING_SIGNALS(tstate->interp); - if (_PyErr_CheckSignalsTstate(tstate) < 0) { - /* On failure, re-schedule a call to handle_signals(). */ - SIGNAL_PENDING_SIGNALS(tstate->interp, 0); - return -1; - } - return 0; -} - -static int -make_pending_calls(PyInterpreterState *interp) -{ - /* only execute pending calls on main thread */ - if (!_Py_ThreadCanHandlePendingCalls()) { - return 0; - } - - /* don't perform recursive pending calls */ - static int busy = 0; - if (busy) { - return 0; - } - busy = 1; - - /* unsignal before starting to call callbacks, so that any callback - added in-between re-signals */ - UNSIGNAL_PENDING_CALLS(interp); - int res = 0; - - /* perform a bounded number of calls, in case of recursion */ - struct _pending_calls *pending = &interp->ceval.pending; - for (int i=0; ilock, WAIT_LOCK); - _pop_pending_call(pending, &func, &arg); - PyThread_release_lock(pending->lock); - - /* having released the lock, perform the callback */ - if (func == NULL) { - break; - } - res = func(arg); - if (res) { - goto error; - } - } - - busy = 0; - return res; - -error: - busy = 0; - SIGNAL_PENDING_CALLS(interp); - return res; -} - -void -_Py_FinishPendingCalls(PyThreadState *tstate) -{ - assert(PyGILState_Check()); - assert(is_tstate_valid(tstate)); - - struct _pending_calls *pending = &tstate->interp->ceval.pending; - - if (!_Py_atomic_load_relaxed_int32(&(pending->calls_to_do))) { - return; - } - - if (make_pending_calls(tstate->interp) < 0) { - PyObject *exc, *val, *tb; - _PyErr_Fetch(tstate, &exc, &val, &tb); - PyErr_BadInternalCall(); - _PyErr_ChainExceptions(exc, val, tb); - _PyErr_Print(tstate); - } -} - -/* Py_MakePendingCalls() is a simple wrapper for the sake - of backward-compatibility. */ -int -Py_MakePendingCalls(void) -{ - assert(PyGILState_Check()); - - PyThreadState *tstate = _PyThreadState_GET(); - assert(is_tstate_valid(tstate)); - - /* Python signal handler doesn't really queue a callback: it only signals - that a signal was received, see _PyEval_SignalReceived(). */ - int res = handle_signals(tstate); - if (res != 0) { - return res; - } - - res = make_pending_calls(tstate->interp); - if (res != 0) { - return res; - } - - return 0; -} - -/* The interpreter's recursion limit */ - -void -_PyEval_InitRuntimeState(struct _ceval_runtime_state *ceval) -{ - _gil_initialize(&ceval->gil); -} - -void -_PyEval_InitState(struct _ceval_state *ceval, PyThread_type_lock pending_lock) -{ - struct _pending_calls *pending = &ceval->pending; - assert(pending->lock == NULL); - - pending->lock = pending_lock; -} - -void -_PyEval_FiniState(struct _ceval_state *ceval) -{ - struct _pending_calls *pending = &ceval->pending; - if (pending->lock != NULL) { - PyThread_free_lock(pending->lock); - pending->lock = NULL; - } -} int Py_GetRecursionLimit(void) @@ -1182,71 +607,6 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag) } -/* Handle signals, pending calls, GIL drop request - and asynchronous exception */ -static int -eval_frame_handle_pending(PyThreadState *tstate) -{ - _PyRuntimeState * const runtime = &_PyRuntime; - struct _ceval_runtime_state *ceval = &runtime->ceval; - - /* Pending signals */ - if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) { - if (handle_signals(tstate) != 0) { - return -1; - } - } - - /* Pending calls */ - struct _ceval_state *ceval2 = &tstate->interp->ceval; - if (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do)) { - if (make_pending_calls(tstate->interp) != 0) { - return -1; - } - } - - /* GIL drop request */ - if (_Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)) { - /* Give another thread a chance */ - if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) { - Py_FatalError("tstate mix-up"); - } - drop_gil(ceval, ceval2, tstate); - - /* Other threads may run now */ - - take_gil(tstate); - - if (_PyThreadState_Swap(&runtime->gilstate, tstate) != NULL) { - Py_FatalError("orphan tstate"); - } - } - - /* Check for asynchronous exception. */ - if (tstate->async_exc != NULL) { - PyObject *exc = tstate->async_exc; - tstate->async_exc = NULL; - UNSIGNAL_ASYNC_EXC(tstate->interp); - _PyErr_SetNone(tstate, exc); - Py_DECREF(exc); - return -1; - } - -#ifdef MS_WINDOWS - // bpo-42296: On Windows, _PyEval_SignalReceived() can be called in a - // different thread than the Python thread, in which case - // _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the - // current Python thread with the correct _Py_ThreadCanHandleSignals() - // value. It prevents to interrupt the eval loop at every instruction if - // the current Python thread cannot handle signals (if - // _Py_ThreadCanHandleSignals() is false). - COMPUTE_EVAL_BREAKER(tstate->interp, ceval, ceval2); -#endif - - return 0; -} - - /* Computed GOTOs, or the-optimization-commonly-but-improperly-known-as-"threaded code" using gcc's labels-as-values extension @@ -1750,7 +1110,7 @@ handle_eval_breaker: * All loops should include a check of the eval breaker. * We also check on return from any builtin function. */ - if (eval_frame_handle_pending(tstate) != 0) { + if (_Py_HandlePending(tstate) != 0) { goto error; } DISPATCH(); diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c new file mode 100644 index 0000000..a679086 --- /dev/null +++ b/Python/ceval_gil.c @@ -0,0 +1,986 @@ + +#include "Python.h" +#include "pycore_atomic.h" // _Py_atomic_int +#include "pycore_ceval.h" // _PyEval_SignalReceived() +#include "pycore_pyerrors.h" // _PyErr_Fetch() +#include "pycore_pylifecycle.h" // _PyErr_Print() +#include "pycore_initconfig.h" // _PyStatus_OK() +#include "pycore_pymem.h" // _PyMem_IsPtrFreed() + +/* + Notes about the implementation: + + - The GIL is just a boolean variable (locked) whose access is protected + by a mutex (gil_mutex), and whose changes are signalled by a condition + variable (gil_cond). gil_mutex is taken for short periods of time, + and therefore mostly uncontended. + + - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be + able to release the GIL on demand by another thread. A volatile boolean + variable (gil_drop_request) is used for that purpose, which is checked + at every turn of the eval loop. That variable is set after a wait of + `interval` microseconds on `gil_cond` has timed out. + + [Actually, another volatile boolean variable (eval_breaker) is used + which ORs several conditions into one. Volatile booleans are + sufficient as inter-thread signalling means since Python is run + on cache-coherent architectures only.] + + - A thread wanting to take the GIL will first let pass a given amount of + time (`interval` microseconds) before setting gil_drop_request. This + encourages a defined switching period, but doesn't enforce it since + opcodes can take an arbitrary time to execute. + + The `interval` value is available for the user to read and modify + using the Python API `sys.{get,set}switchinterval()`. + + - When a thread releases the GIL and gil_drop_request is set, that thread + ensures that another GIL-awaiting thread gets scheduled. + It does so by waiting on a condition variable (switch_cond) until + the value of last_holder is changed to something else than its + own thread state pointer, indicating that another thread was able to + take the GIL. + + This is meant to prohibit the latency-adverse behaviour on multi-core + machines where one thread would speculatively release the GIL, but still + run and end up being the first to re-acquire it, making the "timeslices" + much longer than expected. + (Note: this mechanism is enabled with FORCE_SWITCHING above) +*/ + +// GH-89279: Force inlining by using a macro. +#if defined(_MSC_VER) && SIZEOF_INT == 4 +#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value))) +#else +#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL) +#endif + +/* This can set eval_breaker to 0 even though gil_drop_request became + 1. We believe this is all right because the eval loop will release + the GIL eventually anyway. */ +static inline void +COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, + struct _ceval_runtime_state *ceval, + struct _ceval_state *ceval2) +{ + _Py_atomic_store_relaxed(&ceval2->eval_breaker, + _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request) + | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending) + && _Py_ThreadCanHandleSignals(interp)) + | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do) + && _Py_ThreadCanHandlePendingCalls()) + | ceval2->pending.async_exc); +} + + +static inline void +SET_GIL_DROP_REQUEST(PyInterpreterState *interp) +{ + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1); + _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); +} + + +static inline void +RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) +{ + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); +} + + +static inline void +SIGNAL_PENDING_CALLS(PyInterpreterState *interp) +{ + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); +} + + +static inline void +UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) +{ + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); +} + + +static inline void +SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force) +{ + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->signals_pending, 1); + if (force) { + _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + } + else { + /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + } +} + + +static inline void +UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) +{ + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->signals_pending, 0); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); +} + + +static inline void +SIGNAL_ASYNC_EXC(PyInterpreterState *interp) +{ + struct _ceval_state *ceval2 = &interp->ceval; + ceval2->pending.async_exc = 1; + _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); +} + + +static inline void +UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) +{ + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + ceval2->pending.async_exc = 0; + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); +} + +#ifndef NDEBUG +/* Ensure that tstate is valid */ +static int +is_tstate_valid(PyThreadState *tstate) +{ + assert(!_PyMem_IsPtrFreed(tstate)); + assert(!_PyMem_IsPtrFreed(tstate->interp)); + return 1; +} +#endif + +/* + * Implementation of the Global Interpreter Lock (GIL). + */ + +#include +#include + +#include "pycore_atomic.h" + + +#include "condvar.h" + +#define MUTEX_INIT(mut) \ + if (PyMUTEX_INIT(&(mut))) { \ + Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); }; +#define MUTEX_FINI(mut) \ + if (PyMUTEX_FINI(&(mut))) { \ + Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); }; +#define MUTEX_LOCK(mut) \ + if (PyMUTEX_LOCK(&(mut))) { \ + Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); }; +#define MUTEX_UNLOCK(mut) \ + if (PyMUTEX_UNLOCK(&(mut))) { \ + Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); }; + +#define COND_INIT(cond) \ + if (PyCOND_INIT(&(cond))) { \ + Py_FatalError("PyCOND_INIT(" #cond ") failed"); }; +#define COND_FINI(cond) \ + if (PyCOND_FINI(&(cond))) { \ + Py_FatalError("PyCOND_FINI(" #cond ") failed"); }; +#define COND_SIGNAL(cond) \ + if (PyCOND_SIGNAL(&(cond))) { \ + Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); }; +#define COND_WAIT(cond, mut) \ + if (PyCOND_WAIT(&(cond), &(mut))) { \ + Py_FatalError("PyCOND_WAIT(" #cond ") failed"); }; +#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \ + { \ + int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \ + if (r < 0) \ + Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \ + if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \ + timeout_result = 1; \ + else \ + timeout_result = 0; \ + } \ + + +#define DEFAULT_INTERVAL 5000 + +static void _gil_initialize(struct _gil_runtime_state *gil) +{ + _Py_atomic_int uninitialized = {-1}; + gil->locked = uninitialized; + gil->interval = DEFAULT_INTERVAL; +} + +static int gil_created(struct _gil_runtime_state *gil) +{ + return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0); +} + +static void create_gil(struct _gil_runtime_state *gil) +{ + MUTEX_INIT(gil->mutex); +#ifdef FORCE_SWITCHING + MUTEX_INIT(gil->switch_mutex); +#endif + COND_INIT(gil->cond); +#ifdef FORCE_SWITCHING + COND_INIT(gil->switch_cond); +#endif + _Py_atomic_store_relaxed(&gil->last_holder, 0); + _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked); + _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release); +} + +static void destroy_gil(struct _gil_runtime_state *gil) +{ + /* some pthread-like implementations tie the mutex to the cond + * and must have the cond destroyed first. + */ + COND_FINI(gil->cond); + MUTEX_FINI(gil->mutex); +#ifdef FORCE_SWITCHING + COND_FINI(gil->switch_cond); + MUTEX_FINI(gil->switch_mutex); +#endif + _Py_atomic_store_explicit(&gil->locked, -1, + _Py_memory_order_release); + _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); +} + +#ifdef HAVE_FORK +static void recreate_gil(struct _gil_runtime_state *gil) +{ + _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); + /* XXX should we destroy the old OS resources here? */ + create_gil(gil); +} +#endif + +static void +drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2, + PyThreadState *tstate) +{ + struct _gil_runtime_state *gil = &ceval->gil; + if (!_Py_atomic_load_relaxed(&gil->locked)) { + Py_FatalError("drop_gil: GIL is not locked"); + } + + /* tstate is allowed to be NULL (early interpreter init) */ + if (tstate != NULL) { + /* Sub-interpreter support: threads might have been switched + under our feet using PyThreadState_Swap(). Fix the GIL last + holder variable so that our heuristics work. */ + _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate); + } + + MUTEX_LOCK(gil->mutex); + _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1); + _Py_atomic_store_relaxed(&gil->locked, 0); + COND_SIGNAL(gil->cond); + MUTEX_UNLOCK(gil->mutex); + +#ifdef FORCE_SWITCHING + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) { + MUTEX_LOCK(gil->switch_mutex); + /* Not switched yet => wait */ + if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) + { + assert(is_tstate_valid(tstate)); + RESET_GIL_DROP_REQUEST(tstate->interp); + /* NOTE: if COND_WAIT does not atomically start waiting when + releasing the mutex, another thread can run through, take + the GIL and drop it again, and reset the condition + before we even had a chance to wait for it. */ + COND_WAIT(gil->switch_cond, gil->switch_mutex); + } + MUTEX_UNLOCK(gil->switch_mutex); + } +#endif +} + + +/* Check if a Python thread must exit immediately, rather than taking the GIL + if Py_Finalize() has been called. + + When this function is called by a daemon thread after Py_Finalize() has been + called, the GIL does no longer exist. + + tstate must be non-NULL. */ +static inline int +tstate_must_exit(PyThreadState *tstate) +{ + /* bpo-39877: Access _PyRuntime directly rather than using + tstate->interp->runtime to support calls from Python daemon threads. + After Py_Finalize() has been called, tstate can be a dangling pointer: + point to PyThreadState freed memory. */ + PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime); + return (finalizing != NULL && finalizing != tstate); +} + + +/* Take the GIL. + + The function saves errno at entry and restores its value at exit. + + tstate must be non-NULL. */ +static void +take_gil(PyThreadState *tstate) +{ + int err = errno; + + assert(tstate != NULL); + + if (tstate_must_exit(tstate)) { + /* bpo-39877: If Py_Finalize() has been called and tstate is not the + thread which called Py_Finalize(), exit immediately the thread. + + This code path can be reached by a daemon thread after Py_Finalize() + completes. In this case, tstate is a dangling pointer: points to + PyThreadState freed memory. */ + PyThread_exit_thread(); + } + + assert(is_tstate_valid(tstate)); + PyInterpreterState *interp = tstate->interp; + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + struct _gil_runtime_state *gil = &ceval->gil; + + /* Check that _PyEval_InitThreads() was called to create the lock */ + assert(gil_created(gil)); + + MUTEX_LOCK(gil->mutex); + + if (!_Py_atomic_load_relaxed(&gil->locked)) { + goto _ready; + } + + while (_Py_atomic_load_relaxed(&gil->locked)) { + unsigned long saved_switchnum = gil->switch_number; + + unsigned long interval = (gil->interval >= 1 ? gil->interval : 1); + int timed_out = 0; + COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out); + + /* If we timed out and no switch occurred in the meantime, it is time + to ask the GIL-holding thread to drop it. */ + if (timed_out && + _Py_atomic_load_relaxed(&gil->locked) && + gil->switch_number == saved_switchnum) + { + if (tstate_must_exit(tstate)) { + MUTEX_UNLOCK(gil->mutex); + PyThread_exit_thread(); + } + assert(is_tstate_valid(tstate)); + + SET_GIL_DROP_REQUEST(interp); + } + } + +_ready: +#ifdef FORCE_SWITCHING + /* This mutex must be taken before modifying gil->last_holder: + see drop_gil(). */ + MUTEX_LOCK(gil->switch_mutex); +#endif + /* We now hold the GIL */ + _Py_atomic_store_relaxed(&gil->locked, 1); + _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1); + + if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) { + _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate); + ++gil->switch_number; + } + +#ifdef FORCE_SWITCHING + COND_SIGNAL(gil->switch_cond); + MUTEX_UNLOCK(gil->switch_mutex); +#endif + + if (tstate_must_exit(tstate)) { + /* bpo-36475: If Py_Finalize() has been called and tstate is not + the thread which called Py_Finalize(), exit immediately the + thread. + + This code path can be reached by a daemon thread which was waiting + in take_gil() while the main thread called + wait_for_thread_shutdown() from Py_Finalize(). */ + MUTEX_UNLOCK(gil->mutex); + drop_gil(ceval, ceval2, tstate); + PyThread_exit_thread(); + } + assert(is_tstate_valid(tstate)); + + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { + RESET_GIL_DROP_REQUEST(interp); + } + else { + /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there + is a pending signal: signal received by another thread which cannot + handle signals. + + Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + } + + /* Don't access tstate if the thread must exit */ + if (tstate->async_exc != NULL) { + _PyEval_SignalAsyncExc(tstate->interp); + } + + MUTEX_UNLOCK(gil->mutex); + + errno = err; +} + +void _PyEval_SetSwitchInterval(unsigned long microseconds) +{ + struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil; + gil->interval = microseconds; +} + +unsigned long _PyEval_GetSwitchInterval() +{ + struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil; + return gil->interval; +} + + +int +_PyEval_ThreadsInitialized(_PyRuntimeState *runtime) +{ + return gil_created(&runtime->ceval.gil); +} + +int +PyEval_ThreadsInitialized(void) +{ + _PyRuntimeState *runtime = &_PyRuntime; + return _PyEval_ThreadsInitialized(runtime); +} + +PyStatus +_PyEval_InitGIL(PyThreadState *tstate) +{ + if (!_Py_IsMainInterpreter(tstate->interp)) { + /* Currently, the GIL is shared by all interpreters, + and only the main interpreter is responsible to create + and destroy it. */ + return _PyStatus_OK(); + } + + struct _gil_runtime_state *gil = &tstate->interp->runtime->ceval.gil; + assert(!gil_created(gil)); + + PyThread_init_thread(); + create_gil(gil); + + take_gil(tstate); + + assert(gil_created(gil)); + return _PyStatus_OK(); +} + +void +_PyEval_FiniGIL(PyInterpreterState *interp) +{ + if (!_Py_IsMainInterpreter(interp)) { + /* Currently, the GIL is shared by all interpreters, + and only the main interpreter is responsible to create + and destroy it. */ + return; + } + + struct _gil_runtime_state *gil = &interp->runtime->ceval.gil; + if (!gil_created(gil)) { + /* First Py_InitializeFromConfig() call: the GIL doesn't exist + yet: do nothing. */ + return; + } + + destroy_gil(gil); + assert(!gil_created(gil)); +} + +void +PyEval_InitThreads(void) +{ + /* Do nothing: kept for backward compatibility */ +} + +void +_PyEval_Fini(void) +{ +#ifdef Py_STATS + _Py_PrintSpecializationStats(1); +#endif +} +void +PyEval_AcquireLock(void) +{ + _PyRuntimeState *runtime = &_PyRuntime; + PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); + _Py_EnsureTstateNotNULL(tstate); + + take_gil(tstate); +} + +void +PyEval_ReleaseLock(void) +{ + _PyRuntimeState *runtime = &_PyRuntime; + PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); + /* This function must succeed when the current thread state is NULL. + We therefore avoid PyThreadState_Get() which dumps a fatal error + in debug mode. */ + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); +} + +void +_PyEval_ReleaseLock(PyThreadState *tstate) +{ + struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); +} + +void +PyEval_AcquireThread(PyThreadState *tstate) +{ + _Py_EnsureTstateNotNULL(tstate); + + take_gil(tstate); + + struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate; + if (_PyThreadState_Swap(gilstate, tstate) != NULL) { + Py_FatalError("non-NULL old thread state"); + } +} + +void +PyEval_ReleaseThread(PyThreadState *tstate) +{ + assert(is_tstate_valid(tstate)); + + _PyRuntimeState *runtime = tstate->interp->runtime; + PyThreadState *new_tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); + if (new_tstate != tstate) { + Py_FatalError("wrong thread state"); + } + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); +} + +#ifdef HAVE_FORK +/* This function is called from PyOS_AfterFork_Child to destroy all threads + which are not running in the child process, and clear internal locks + which might be held by those threads. */ +PyStatus +_PyEval_ReInitThreads(PyThreadState *tstate) +{ + _PyRuntimeState *runtime = tstate->interp->runtime; + + struct _gil_runtime_state *gil = &runtime->ceval.gil; + if (!gil_created(gil)) { + return _PyStatus_OK(); + } + recreate_gil(gil); + + take_gil(tstate); + + struct _pending_calls *pending = &tstate->interp->ceval.pending; + if (_PyThread_at_fork_reinit(&pending->lock) < 0) { + return _PyStatus_ERR("Can't reinitialize pending calls lock"); + } + + /* Destroy all threads except the current one */ + _PyThreadState_DeleteExcept(runtime, tstate); + return _PyStatus_OK(); +} +#endif + +/* This function is used to signal that async exceptions are waiting to be + raised. */ + +void +_PyEval_SignalAsyncExc(PyInterpreterState *interp) +{ + SIGNAL_ASYNC_EXC(interp); +} + +PyThreadState * +PyEval_SaveThread(void) +{ + _PyRuntimeState *runtime = &_PyRuntime; + PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); + _Py_EnsureTstateNotNULL(tstate); + + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + assert(gil_created(&ceval->gil)); + drop_gil(ceval, ceval2, tstate); + return tstate; +} + +void +PyEval_RestoreThread(PyThreadState *tstate) +{ + _Py_EnsureTstateNotNULL(tstate); + + take_gil(tstate); + + struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate; + _PyThreadState_Swap(gilstate, tstate); +} + + +/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX + signal handlers or Mac I/O completion routines) can schedule calls + to a function to be called synchronously. + The synchronous function is called with one void* argument. + It should return 0 for success or -1 for failure -- failure should + be accompanied by an exception. + + If registry succeeds, the registry function returns 0; if it fails + (e.g. due to too many pending calls) it returns -1 (without setting + an exception condition). + + Note that because registry may occur from within signal handlers, + or other asynchronous events, calling malloc() is unsafe! + + Any thread can schedule pending calls, but only the main thread + will execute them. + There is no facility to schedule calls to a particular thread, but + that should be easy to change, should that ever be required. In + that case, the static variables here should go into the python + threadstate. +*/ + +void +_PyEval_SignalReceived(PyInterpreterState *interp) +{ +#ifdef MS_WINDOWS + // bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal + // handler which can run in a thread different than the Python thread, in + // which case _Py_ThreadCanHandleSignals() is wrong. Ignore + // _Py_ThreadCanHandleSignals() and always set eval_breaker to 1. + // + // The next eval_frame_handle_pending() call will call + // _Py_ThreadCanHandleSignals() to recompute eval_breaker. + int force = 1; +#else + int force = 0; +#endif + /* bpo-30703: Function called when the C signal handler of Python gets a + signal. We cannot queue a callback using _PyEval_AddPendingCall() since + that function is not async-signal-safe. */ + SIGNAL_PENDING_SIGNALS(interp, force); +} + +/* Push one item onto the queue while holding the lock. */ +static int +_push_pending_call(struct _pending_calls *pending, + int (*func)(void *), void *arg) +{ + int i = pending->last; + int j = (i + 1) % NPENDINGCALLS; + if (j == pending->first) { + return -1; /* Queue full */ + } + pending->calls[i].func = func; + pending->calls[i].arg = arg; + pending->last = j; + return 0; +} + +/* Pop one item off the queue while holding the lock. */ +static void +_pop_pending_call(struct _pending_calls *pending, + int (**func)(void *), void **arg) +{ + int i = pending->first; + if (i == pending->last) { + return; /* Queue empty */ + } + + *func = pending->calls[i].func; + *arg = pending->calls[i].arg; + pending->first = (i + 1) % NPENDINGCALLS; +} + +/* This implementation is thread-safe. It allows + scheduling to be made from any thread, and even from an executing + callback. + */ + +int +_PyEval_AddPendingCall(PyInterpreterState *interp, + int (*func)(void *), void *arg) +{ + struct _pending_calls *pending = &interp->ceval.pending; + /* Ensure that _PyEval_InitState() was called + and that _PyEval_FiniState() is not called yet. */ + assert(pending->lock != NULL); + + PyThread_acquire_lock(pending->lock, WAIT_LOCK); + int result = _push_pending_call(pending, func, arg); + PyThread_release_lock(pending->lock); + + /* signal main loop */ + SIGNAL_PENDING_CALLS(interp); + return result; +} + +int +Py_AddPendingCall(int (*func)(void *), void *arg) +{ + /* Best-effort to support subinterpreters and calls with the GIL released. + + First attempt _PyThreadState_GET() since it supports subinterpreters. + + If the GIL is released, _PyThreadState_GET() returns NULL . In this + case, use PyGILState_GetThisThreadState() which works even if the GIL + is released. + + Sadly, PyGILState_GetThisThreadState() doesn't support subinterpreters: + see bpo-10915 and bpo-15751. + + Py_AddPendingCall() doesn't require the caller to hold the GIL. */ + PyThreadState *tstate = _PyThreadState_GET(); + if (tstate == NULL) { + tstate = PyGILState_GetThisThreadState(); + } + + PyInterpreterState *interp; + if (tstate != NULL) { + interp = tstate->interp; + } + else { + /* Last resort: use the main interpreter */ + interp = _PyInterpreterState_Main(); + } + return _PyEval_AddPendingCall(interp, func, arg); +} + +static int +handle_signals(PyThreadState *tstate) +{ + assert(is_tstate_valid(tstate)); + if (!_Py_ThreadCanHandleSignals(tstate->interp)) { + return 0; + } + + UNSIGNAL_PENDING_SIGNALS(tstate->interp); + if (_PyErr_CheckSignalsTstate(tstate) < 0) { + /* On failure, re-schedule a call to handle_signals(). */ + SIGNAL_PENDING_SIGNALS(tstate->interp, 0); + return -1; + } + return 0; +} + +static int +make_pending_calls(PyInterpreterState *interp) +{ + /* only execute pending calls on main thread */ + if (!_Py_ThreadCanHandlePendingCalls()) { + return 0; + } + + /* don't perform recursive pending calls */ + static int busy = 0; + if (busy) { + return 0; + } + busy = 1; + + /* unsignal before starting to call callbacks, so that any callback + added in-between re-signals */ + UNSIGNAL_PENDING_CALLS(interp); + int res = 0; + + /* perform a bounded number of calls, in case of recursion */ + struct _pending_calls *pending = &interp->ceval.pending; + for (int i=0; ilock, WAIT_LOCK); + _pop_pending_call(pending, &func, &arg); + PyThread_release_lock(pending->lock); + + /* having released the lock, perform the callback */ + if (func == NULL) { + break; + } + res = func(arg); + if (res) { + goto error; + } + } + + busy = 0; + return res; + +error: + busy = 0; + SIGNAL_PENDING_CALLS(interp); + return res; +} + +void +_Py_FinishPendingCalls(PyThreadState *tstate) +{ + assert(PyGILState_Check()); + assert(is_tstate_valid(tstate)); + + struct _pending_calls *pending = &tstate->interp->ceval.pending; + + if (!_Py_atomic_load_relaxed_int32(&(pending->calls_to_do))) { + return; + } + + if (make_pending_calls(tstate->interp) < 0) { + PyObject *exc, *val, *tb; + _PyErr_Fetch(tstate, &exc, &val, &tb); + PyErr_BadInternalCall(); + _PyErr_ChainExceptions(exc, val, tb); + _PyErr_Print(tstate); + } +} + +/* Py_MakePendingCalls() is a simple wrapper for the sake + of backward-compatibility. */ +int +Py_MakePendingCalls(void) +{ + assert(PyGILState_Check()); + + PyThreadState *tstate = _PyThreadState_GET(); + assert(is_tstate_valid(tstate)); + + /* Python signal handler doesn't really queue a callback: it only signals + that a signal was received, see _PyEval_SignalReceived(). */ + int res = handle_signals(tstate); + if (res != 0) { + return res; + } + + res = make_pending_calls(tstate->interp); + if (res != 0) { + return res; + } + + return 0; +} + +/* The interpreter's recursion limit */ + +void +_PyEval_InitRuntimeState(struct _ceval_runtime_state *ceval) +{ + _gil_initialize(&ceval->gil); +} + +void +_PyEval_InitState(struct _ceval_state *ceval, PyThread_type_lock pending_lock) +{ + struct _pending_calls *pending = &ceval->pending; + assert(pending->lock == NULL); + + pending->lock = pending_lock; +} + +void +_PyEval_FiniState(struct _ceval_state *ceval) +{ + struct _pending_calls *pending = &ceval->pending; + if (pending->lock != NULL) { + PyThread_free_lock(pending->lock); + pending->lock = NULL; + } +} + +/* Handle signals, pending calls, GIL drop request + and asynchronous exception */ +int +_Py_HandlePending(PyThreadState *tstate) +{ + _PyRuntimeState * const runtime = &_PyRuntime; + struct _ceval_runtime_state *ceval = &runtime->ceval; + + /* Pending signals */ + if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) { + if (handle_signals(tstate) != 0) { + return -1; + } + } + + /* Pending calls */ + struct _ceval_state *ceval2 = &tstate->interp->ceval; + if (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do)) { + if (make_pending_calls(tstate->interp) != 0) { + return -1; + } + } + + /* GIL drop request */ + if (_Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)) { + /* Give another thread a chance */ + if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) { + Py_FatalError("tstate mix-up"); + } + drop_gil(ceval, ceval2, tstate); + + /* Other threads may run now */ + + take_gil(tstate); + + if (_PyThreadState_Swap(&runtime->gilstate, tstate) != NULL) { + Py_FatalError("orphan tstate"); + } + } + + /* Check for asynchronous exception. */ + if (tstate->async_exc != NULL) { + PyObject *exc = tstate->async_exc; + tstate->async_exc = NULL; + UNSIGNAL_ASYNC_EXC(tstate->interp); + _PyErr_SetNone(tstate, exc); + Py_DECREF(exc); + return -1; + } + +#ifdef MS_WINDOWS + // bpo-42296: On Windows, _PyEval_SignalReceived() can be called in a + // different thread than the Python thread, in which case + // _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the + // current Python thread with the correct _Py_ThreadCanHandleSignals() + // value. It prevents to interrupt the eval loop at every instruction if + // the current Python thread cannot handle signals (if + // _Py_ThreadCanHandleSignals() is false). + COMPUTE_EVAL_BREAKER(tstate->interp, ceval, ceval2); +#endif + + return 0; +} + diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h deleted file mode 100644 index 4c71edd..0000000 --- a/Python/ceval_gil.h +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Implementation of the Global Interpreter Lock (GIL). - */ - -#include -#include - -#include "pycore_atomic.h" - - -/* - Notes about the implementation: - - - The GIL is just a boolean variable (locked) whose access is protected - by a mutex (gil_mutex), and whose changes are signalled by a condition - variable (gil_cond). gil_mutex is taken for short periods of time, - and therefore mostly uncontended. - - - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be - able to release the GIL on demand by another thread. A volatile boolean - variable (gil_drop_request) is used for that purpose, which is checked - at every turn of the eval loop. That variable is set after a wait of - `interval` microseconds on `gil_cond` has timed out. - - [Actually, another volatile boolean variable (eval_breaker) is used - which ORs several conditions into one. Volatile booleans are - sufficient as inter-thread signalling means since Python is run - on cache-coherent architectures only.] - - - A thread wanting to take the GIL will first let pass a given amount of - time (`interval` microseconds) before setting gil_drop_request. This - encourages a defined switching period, but doesn't enforce it since - opcodes can take an arbitrary time to execute. - - The `interval` value is available for the user to read and modify - using the Python API `sys.{get,set}switchinterval()`. - - - When a thread releases the GIL and gil_drop_request is set, that thread - ensures that another GIL-awaiting thread gets scheduled. - It does so by waiting on a condition variable (switch_cond) until - the value of last_holder is changed to something else than its - own thread state pointer, indicating that another thread was able to - take the GIL. - - This is meant to prohibit the latency-adverse behaviour on multi-core - machines where one thread would speculatively release the GIL, but still - run and end up being the first to re-acquire it, making the "timeslices" - much longer than expected. - (Note: this mechanism is enabled with FORCE_SWITCHING above) -*/ - -#include "condvar.h" - -#define MUTEX_INIT(mut) \ - if (PyMUTEX_INIT(&(mut))) { \ - Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); }; -#define MUTEX_FINI(mut) \ - if (PyMUTEX_FINI(&(mut))) { \ - Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); }; -#define MUTEX_LOCK(mut) \ - if (PyMUTEX_LOCK(&(mut))) { \ - Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); }; -#define MUTEX_UNLOCK(mut) \ - if (PyMUTEX_UNLOCK(&(mut))) { \ - Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); }; - -#define COND_INIT(cond) \ - if (PyCOND_INIT(&(cond))) { \ - Py_FatalError("PyCOND_INIT(" #cond ") failed"); }; -#define COND_FINI(cond) \ - if (PyCOND_FINI(&(cond))) { \ - Py_FatalError("PyCOND_FINI(" #cond ") failed"); }; -#define COND_SIGNAL(cond) \ - if (PyCOND_SIGNAL(&(cond))) { \ - Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); }; -#define COND_WAIT(cond, mut) \ - if (PyCOND_WAIT(&(cond), &(mut))) { \ - Py_FatalError("PyCOND_WAIT(" #cond ") failed"); }; -#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \ - { \ - int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \ - if (r < 0) \ - Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \ - if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \ - timeout_result = 1; \ - else \ - timeout_result = 0; \ - } \ - - -#define DEFAULT_INTERVAL 5000 - -static void _gil_initialize(struct _gil_runtime_state *gil) -{ - _Py_atomic_int uninitialized = {-1}; - gil->locked = uninitialized; - gil->interval = DEFAULT_INTERVAL; -} - -static int gil_created(struct _gil_runtime_state *gil) -{ - return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0); -} - -static void create_gil(struct _gil_runtime_state *gil) -{ - MUTEX_INIT(gil->mutex); -#ifdef FORCE_SWITCHING - MUTEX_INIT(gil->switch_mutex); -#endif - COND_INIT(gil->cond); -#ifdef FORCE_SWITCHING - COND_INIT(gil->switch_cond); -#endif - _Py_atomic_store_relaxed(&gil->last_holder, 0); - _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked); - _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release); -} - -static void destroy_gil(struct _gil_runtime_state *gil) -{ - /* some pthread-like implementations tie the mutex to the cond - * and must have the cond destroyed first. - */ - COND_FINI(gil->cond); - MUTEX_FINI(gil->mutex); -#ifdef FORCE_SWITCHING - COND_FINI(gil->switch_cond); - MUTEX_FINI(gil->switch_mutex); -#endif - _Py_atomic_store_explicit(&gil->locked, -1, - _Py_memory_order_release); - _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); -} - -#ifdef HAVE_FORK -static void recreate_gil(struct _gil_runtime_state *gil) -{ - _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); - /* XXX should we destroy the old OS resources here? */ - create_gil(gil); -} -#endif - -static void -drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2, - PyThreadState *tstate) -{ - struct _gil_runtime_state *gil = &ceval->gil; - if (!_Py_atomic_load_relaxed(&gil->locked)) { - Py_FatalError("drop_gil: GIL is not locked"); - } - - /* tstate is allowed to be NULL (early interpreter init) */ - if (tstate != NULL) { - /* Sub-interpreter support: threads might have been switched - under our feet using PyThreadState_Swap(). Fix the GIL last - holder variable so that our heuristics work. */ - _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate); - } - - MUTEX_LOCK(gil->mutex); - _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1); - _Py_atomic_store_relaxed(&gil->locked, 0); - COND_SIGNAL(gil->cond); - MUTEX_UNLOCK(gil->mutex); - -#ifdef FORCE_SWITCHING - if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) { - MUTEX_LOCK(gil->switch_mutex); - /* Not switched yet => wait */ - if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) - { - assert(is_tstate_valid(tstate)); - RESET_GIL_DROP_REQUEST(tstate->interp); - /* NOTE: if COND_WAIT does not atomically start waiting when - releasing the mutex, another thread can run through, take - the GIL and drop it again, and reset the condition - before we even had a chance to wait for it. */ - COND_WAIT(gil->switch_cond, gil->switch_mutex); - } - MUTEX_UNLOCK(gil->switch_mutex); - } -#endif -} - - -/* Check if a Python thread must exit immediately, rather than taking the GIL - if Py_Finalize() has been called. - - When this function is called by a daemon thread after Py_Finalize() has been - called, the GIL does no longer exist. - - tstate must be non-NULL. */ -static inline int -tstate_must_exit(PyThreadState *tstate) -{ - /* bpo-39877: Access _PyRuntime directly rather than using - tstate->interp->runtime to support calls from Python daemon threads. - After Py_Finalize() has been called, tstate can be a dangling pointer: - point to PyThreadState freed memory. */ - PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime); - return (finalizing != NULL && finalizing != tstate); -} - - -/* Take the GIL. - - The function saves errno at entry and restores its value at exit. - - tstate must be non-NULL. */ -static void -take_gil(PyThreadState *tstate) -{ - int err = errno; - - assert(tstate != NULL); - - if (tstate_must_exit(tstate)) { - /* bpo-39877: If Py_Finalize() has been called and tstate is not the - thread which called Py_Finalize(), exit immediately the thread. - - This code path can be reached by a daemon thread after Py_Finalize() - completes. In this case, tstate is a dangling pointer: points to - PyThreadState freed memory. */ - PyThread_exit_thread(); - } - - assert(is_tstate_valid(tstate)); - PyInterpreterState *interp = tstate->interp; - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - struct _gil_runtime_state *gil = &ceval->gil; - - /* Check that _PyEval_InitThreads() was called to create the lock */ - assert(gil_created(gil)); - - MUTEX_LOCK(gil->mutex); - - if (!_Py_atomic_load_relaxed(&gil->locked)) { - goto _ready; - } - - while (_Py_atomic_load_relaxed(&gil->locked)) { - unsigned long saved_switchnum = gil->switch_number; - - unsigned long interval = (gil->interval >= 1 ? gil->interval : 1); - int timed_out = 0; - COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out); - - /* If we timed out and no switch occurred in the meantime, it is time - to ask the GIL-holding thread to drop it. */ - if (timed_out && - _Py_atomic_load_relaxed(&gil->locked) && - gil->switch_number == saved_switchnum) - { - if (tstate_must_exit(tstate)) { - MUTEX_UNLOCK(gil->mutex); - PyThread_exit_thread(); - } - assert(is_tstate_valid(tstate)); - - SET_GIL_DROP_REQUEST(interp); - } - } - -_ready: -#ifdef FORCE_SWITCHING - /* This mutex must be taken before modifying gil->last_holder: - see drop_gil(). */ - MUTEX_LOCK(gil->switch_mutex); -#endif - /* We now hold the GIL */ - _Py_atomic_store_relaxed(&gil->locked, 1); - _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1); - - if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) { - _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate); - ++gil->switch_number; - } - -#ifdef FORCE_SWITCHING - COND_SIGNAL(gil->switch_cond); - MUTEX_UNLOCK(gil->switch_mutex); -#endif - - if (tstate_must_exit(tstate)) { - /* bpo-36475: If Py_Finalize() has been called and tstate is not - the thread which called Py_Finalize(), exit immediately the - thread. - - This code path can be reached by a daemon thread which was waiting - in take_gil() while the main thread called - wait_for_thread_shutdown() from Py_Finalize(). */ - MUTEX_UNLOCK(gil->mutex); - drop_gil(ceval, ceval2, tstate); - PyThread_exit_thread(); - } - assert(is_tstate_valid(tstate)); - - if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { - RESET_GIL_DROP_REQUEST(interp); - } - else { - /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there - is a pending signal: signal received by another thread which cannot - handle signals. - - Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); - } - - /* Don't access tstate if the thread must exit */ - if (tstate->async_exc != NULL) { - _PyEval_SignalAsyncExc(tstate->interp); - } - - MUTEX_UNLOCK(gil->mutex); - - errno = err; -} - -void _PyEval_SetSwitchInterval(unsigned long microseconds) -{ - struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil; - gil->interval = microseconds; -} - -unsigned long _PyEval_GetSwitchInterval() -{ - struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil; - return gil->interval; -} diff --git a/Tools/c-analyzer/cpython/_parser.py b/Tools/c-analyzer/cpython/_parser.py index 992d2e5..dc8423b 100644 --- a/Tools/c-analyzer/cpython/_parser.py +++ b/Tools/c-analyzer/cpython/_parser.py @@ -174,7 +174,6 @@ Objects/stringlib/codecs.h Py_BUILD_CORE 1 Objects/stringlib/unicode_format.h Py_BUILD_CORE 1 Parser/string_parser.h Py_BUILD_CORE 1 Parser/pegen.h Py_BUILD_CORE 1 -Python/ceval_gil.h Py_BUILD_CORE 1 Python/condvar.h Py_BUILD_CORE 1 Modules/_json.c Py_BUILD_CORE_BUILTIN 1 diff --git a/Tools/gdb/libpython.py b/Tools/gdb/libpython.py index 899cb6c..303409c 100755 --- a/Tools/gdb/libpython.py +++ b/Tools/gdb/libpython.py @@ -1752,7 +1752,7 @@ class Frame(object): def is_waiting_for_gil(self): '''Is this frame waiting on the GIL?''' - # This assumes the _POSIX_THREADS version of Python/ceval_gil.h: + # This assumes the _POSIX_THREADS version of Python/ceval_gil.c: name = self._gdbframe.name() if name: return (name == 'take_gil') -- cgit v0.12