diff options
author | Victor Stinner <vstinner@python.org> | 2020-05-05 14:14:31 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-05-05 14:14:31 (GMT) |
commit | 0b1e3307e24b0af45787ab6456535b8346e0239a (patch) | |
tree | a1dc53188efc9af1478f3c9fe707bbaac02f1224 /Python | |
parent | 4e01946cafca0cf49f796c3118e0d65237bcad69 (diff) | |
download | cpython-0b1e3307e24b0af45787ab6456535b8346e0239a.zip cpython-0b1e3307e24b0af45787ab6456535b8346e0239a.tar.gz cpython-0b1e3307e24b0af45787ab6456535b8346e0239a.tar.bz2 |
bpo-40513: Per-interpreter gil_drop_request (GH-19927)
Move gil_drop_request member from _PyRuntimeState.ceval to
PyInterpreterState.ceval.
Diffstat (limited to 'Python')
-rw-r--r-- | Python/ceval.c | 75 | ||||
-rw-r--r-- | Python/ceval_gil.h | 13 |
2 files changed, 43 insertions, 45 deletions
diff --git a/Python/ceval.c b/Python/ceval.c index addc026..6b00273 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -143,77 +143,70 @@ is_tstate_valid(PyThreadState *tstate) the GIL eventually anyway. */ static inline void COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, - struct _ceval_runtime_state *ceval, - struct _ceval_state *ceval2) + struct _ceval_state *ceval) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, + _Py_atomic_store_relaxed(&ceval->eval_breaker, _Py_atomic_load_relaxed(&ceval->gil_drop_request) - | (_Py_atomic_load_relaxed(&ceval2->signals_pending) + | (_Py_atomic_load_relaxed(&ceval->signals_pending) && _Py_ThreadCanHandleSignals(interp)) - | (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do) + | (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do) && _Py_ThreadCanHandlePendingCalls()) - | ceval2->pending.async_exc); + | ceval->pending.async_exc); } static inline void SET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; + struct _ceval_state *ceval = &interp->ceval; _Py_atomic_store_relaxed(&ceval->gil_drop_request, 1); - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); } static inline void RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; + struct _ceval_state *ceval = &interp->ceval; _Py_atomic_store_relaxed(&ceval->gil_drop_request, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void SIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->signals_pending, 1); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->signals_pending, 1); /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->signals_pending, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->signals_pending, 0); + COMPUTE_EVAL_BREAKER(interp, ceval); } @@ -229,10 +222,9 @@ SIGNAL_ASYNC_EXC(PyInterpreterState *interp) static inline void UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 0; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + ceval->pending.async_exc = 0; + COMPUTE_EVAL_BREAKER(interp, ceval); } @@ -357,17 +349,19 @@ PyEval_ReleaseLock(void) { _PyRuntimeState *runtime = &_PyRuntime; PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); + struct _ceval_state *ceval2 = &tstate->interp->ceval; /* This function must succeed when the current thread state is NULL. We therefore avoid PyThreadState_Get() which dumps a fatal error in debug mode. */ - drop_gil(&runtime->ceval, tstate); + drop_gil(&runtime->ceval, ceval2, tstate); } void _PyEval_ReleaseLock(PyThreadState *tstate) { struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval; - drop_gil(ceval, tstate); + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); } void @@ -393,7 +387,9 @@ PyEval_ReleaseThread(PyThreadState *tstate) if (new_tstate != tstate) { Py_FatalError("wrong thread state"); } - drop_gil(&runtime->ceval, tstate); + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); } #ifdef HAVE_FORK @@ -439,13 +435,14 @@ PyThreadState * PyEval_SaveThread(void) { _PyRuntimeState *runtime = &_PyRuntime; - struct _ceval_runtime_state *ceval = &runtime->ceval; PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); ensure_tstate_not_null(__func__, tstate); + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; assert(gil_created(&ceval->gil)); - drop_gil(ceval, tstate); + drop_gil(ceval, ceval2, tstate); return tstate; } @@ -847,12 +844,12 @@ eval_frame_handle_pending(PyThreadState *tstate) } /* GIL drop request */ - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { /* Give another thread a chance */ if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) { Py_FatalError("tstate mix-up"); } - drop_gil(ceval, tstate); + drop_gil(ceval, ceval2, tstate); /* Other threads may run now */ diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h index a025a9f..db47077 100644 --- a/Python/ceval_gil.h +++ b/Python/ceval_gil.h @@ -141,7 +141,8 @@ static void recreate_gil(struct _gil_runtime_state *gil) } static void -drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate) +drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2, + PyThreadState *tstate) { struct _gil_runtime_state *gil = &ceval->gil; if (!_Py_atomic_load_relaxed(&gil->locked)) { @@ -163,7 +164,7 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate) MUTEX_UNLOCK(gil->mutex); #ifdef FORCE_SWITCHING - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) { + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) { MUTEX_LOCK(gil->switch_mutex); /* Not switched yet => wait */ if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) @@ -226,6 +227,7 @@ take_gil(PyThreadState *tstate) assert(is_tstate_valid(tstate)); PyInterpreterState *interp = tstate->interp; struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; struct _gil_runtime_state *gil = &ceval->gil; /* Check that _PyEval_InitThreads() was called to create the lock */ @@ -289,12 +291,12 @@ _ready: in take_gil() while the main thread called wait_for_thread_shutdown() from Py_Finalize(). */ MUTEX_UNLOCK(gil->mutex); - drop_gil(ceval, tstate); + drop_gil(ceval, ceval2, tstate); PyThread_exit_thread(); } assert(is_tstate_valid(tstate)); - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { RESET_GIL_DROP_REQUEST(interp); } else { @@ -303,8 +305,7 @@ _ready: handle signals. Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ - struct _ceval_state *ceval2 = &interp->ceval; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval2); } /* Don't access tstate if the thread must exit */ |