summaryrefslogtreecommitdiffstats
path: root/Python/ceval_gil.h
diff options
context:
space:
mode:
authorVictor Stinner <vstinner@python.org>2020-03-19 01:41:21 (GMT)
committerGitHub <noreply@github.com>2020-03-19 01:41:21 (GMT)
commit50e6e991781db761c496561a995541ca8d83ff87 (patch)
tree3380890960438581d8262ad24e2c8b346d664c87 /Python/ceval_gil.h
parent3cde88439d542ed8ca6395acc8dfffd174ecca18 (diff)
downloadcpython-50e6e991781db761c496561a995541ca8d83ff87.zip
cpython-50e6e991781db761c496561a995541ca8d83ff87.tar.gz
cpython-50e6e991781db761c496561a995541ca8d83ff87.tar.bz2
bpo-39984: Move pending calls to PyInterpreterState (GH-19066)
If Py_AddPendingCall() is called in a subinterpreter, the function is now scheduled to be called from the subinterpreter, rather than being called from the main interpreter. Each subinterpreter now has its own list of scheduled calls. * Move pending and eval_breaker fields from _PyRuntimeState.ceval to PyInterpreterState.ceval. * new_interpreter() now calls _PyEval_InitThreads() to create pending calls lock. * Fix Py_AddPendingCall() for subinterpreters. It now calls _PyThreadState_GET() which works in a subinterpreter if the caller holds the GIL, and only falls back on PyGILState_GetThisThreadState() if _PyThreadState_GET() returns NULL.
Diffstat (limited to 'Python/ceval_gil.h')
-rw-r--r--Python/ceval_gil.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h
index 3e9f405..b359e3c 100644
--- a/Python/ceval_gil.h
+++ b/Python/ceval_gil.h
@@ -141,7 +141,8 @@ static void recreate_gil(struct _gil_runtime_state *gil)
}
static void
-drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
+drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
+ PyThreadState *tstate)
{
struct _gil_runtime_state *gil = &ceval->gil;
if (!_Py_atomic_load_relaxed(&gil->locked)) {
@@ -168,7 +169,7 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
/* Not switched yet => wait */
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
{
- RESET_GIL_DROP_REQUEST(ceval);
+ RESET_GIL_DROP_REQUEST(ceval, ceval2);
/* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take
the GIL and drop it again, and reset the condition
@@ -230,6 +231,7 @@ take_gil(PyThreadState *tstate)
struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval;
struct _gil_runtime_state *gil = &ceval->gil;
+ struct _ceval_state *ceval2 = &tstate->interp->ceval;
/* Check that _PyEval_InitThreads() was called to create the lock */
assert(gil_created(gil));
@@ -279,7 +281,7 @@ _ready:
MUTEX_UNLOCK(gil->switch_mutex);
#endif
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
- RESET_GIL_DROP_REQUEST(ceval);
+ RESET_GIL_DROP_REQUEST(ceval, ceval2);
}
int must_exit = tstate_must_exit(tstate);
@@ -299,7 +301,7 @@ _ready:
This code path can be reached by a daemon thread which was waiting
in take_gil() while the main thread called
wait_for_thread_shutdown() from Py_Finalize(). */
- drop_gil(ceval, tstate);
+ drop_gil(ceval, ceval2, tstate);
PyThread_exit_thread();
}