summaryrefslogtreecommitdiffstats
path: root/Python/ceval.c
diff options
context:
space:
mode:
authorEric Snow <ericsnowcurrently@gmail.com>2017-09-08 05:51:28 (GMT)
committerGitHub <noreply@github.com>2017-09-08 05:51:28 (GMT)
commit2ebc5ce42a8a9e047e790aefbf9a94811569b2b6 (patch)
treef8c483f24e0d1ee43ac5cc9ad82d2ee7cccf69d2 /Python/ceval.c
parentbab21faded31c70b142776b9a6075a4cda055d7f (diff)
downloadcpython-2ebc5ce42a8a9e047e790aefbf9a94811569b2b6.zip
cpython-2ebc5ce42a8a9e047e790aefbf9a94811569b2b6.tar.gz
cpython-2ebc5ce42a8a9e047e790aefbf9a94811569b2b6.tar.bz2
bpo-30860: Consolidate stateful runtime globals. (#3397)
* group the (stateful) runtime globals into various topical structs * consolidate the topical structs under a single top-level _PyRuntimeState struct * add a check-c-globals.py script that helps identify runtime globals Other globals are excluded (see globals.txt and check-c-globals.py).
Diffstat (limited to 'Python/ceval.c')
-rw-r--r--Python/ceval.c156
1 files changed, 76 insertions, 80 deletions
diff --git a/Python/ceval.c b/Python/ceval.c
index a072a5f..e583e27 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -10,6 +10,7 @@
#define PY_LOCAL_AGGRESSIVE
#include "Python.h"
+#include "internal/pystate.h"
#include "code.h"
#include "dictobject.h"
@@ -36,7 +37,8 @@ extern int _PyObject_GetMethod(PyObject *, PyObject *, PyObject **);
typedef PyObject *(*callproc)(PyObject *, PyObject *, PyObject *);
/* Forward declarations */
-Py_LOCAL_INLINE(PyObject *) call_function(PyObject ***, Py_ssize_t, PyObject *);
+Py_LOCAL_INLINE(PyObject *) call_function(PyObject ***, Py_ssize_t,
+ PyObject *);
static PyObject * do_call_core(PyObject *, PyObject *, PyObject *);
#ifdef LLTRACE
@@ -52,13 +54,15 @@ static int call_trace_protected(Py_tracefunc, PyObject *,
static void call_exc_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *);
static int maybe_call_line_trace(Py_tracefunc, PyObject *,
- PyThreadState *, PyFrameObject *, int *, int *, int *);
+ PyThreadState *, PyFrameObject *,
+ int *, int *, int *);
static void maybe_dtrace_line(PyFrameObject *, int *, int *, int *);
static void dtrace_function_entry(PyFrameObject *);
static void dtrace_function_return(PyFrameObject *);
static PyObject * cmp_outcome(int, PyObject *, PyObject *);
-static PyObject * import_name(PyFrameObject *, PyObject *, PyObject *, PyObject *);
+static PyObject * import_name(PyFrameObject *, PyObject *, PyObject *,
+ PyObject *);
static PyObject * import_from(PyObject *, PyObject *);
static int import_all_from(PyObject *, PyObject *);
static void format_exc_check_arg(PyObject *, const char *, PyObject *);
@@ -87,72 +91,60 @@ static long dxp[256];
#endif
#endif
-#define GIL_REQUEST _Py_atomic_load_relaxed(&gil_drop_request)
+#define GIL_REQUEST _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)
/* This can set eval_breaker to 0 even though gil_drop_request became
1. We believe this is all right because the eval loop will release
the GIL eventually anyway. */
#define COMPUTE_EVAL_BREAKER() \
_Py_atomic_store_relaxed( \
- &eval_breaker, \
+ &_PyRuntime.ceval.eval_breaker, \
GIL_REQUEST | \
- _Py_atomic_load_relaxed(&pendingcalls_to_do) | \
- pending_async_exc)
+ _Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \
+ _PyRuntime.ceval.pending.async_exc)
#define SET_GIL_DROP_REQUEST() \
do { \
- _Py_atomic_store_relaxed(&gil_drop_request, 1); \
- _Py_atomic_store_relaxed(&eval_breaker, 1); \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#define RESET_GIL_DROP_REQUEST() \
do { \
- _Py_atomic_store_relaxed(&gil_drop_request, 0); \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
/* Pending calls are only modified under pending_lock */
#define SIGNAL_PENDING_CALLS() \
do { \
- _Py_atomic_store_relaxed(&pendingcalls_to_do, 1); \
- _Py_atomic_store_relaxed(&eval_breaker, 1); \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#define UNSIGNAL_PENDING_CALLS() \
do { \
- _Py_atomic_store_relaxed(&pendingcalls_to_do, 0); \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
#define SIGNAL_ASYNC_EXC() \
do { \
- pending_async_exc = 1; \
- _Py_atomic_store_relaxed(&eval_breaker, 1); \
+ _PyRuntime.ceval.pending.async_exc = 1; \
+ _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#define UNSIGNAL_ASYNC_EXC() \
- do { pending_async_exc = 0; COMPUTE_EVAL_BREAKER(); } while (0)
-
+ do { \
+ _PyRuntime.ceval.pending.async_exc = 0; \
+ COMPUTE_EVAL_BREAKER(); \
+ } while (0)
-/* This single variable consolidates all requests to break out of the fast path
- in the eval loop. */
-static _Py_atomic_int eval_breaker = {0};
-/* Request for running pending calls. */
-static _Py_atomic_int pendingcalls_to_do = {0};
-/* Request for looking at the `async_exc` field of the current thread state.
- Guarded by the GIL. */
-static int pending_async_exc = 0;
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
#include "pythread.h"
-
-static PyThread_type_lock pending_lock = 0; /* for pending calls */
-static unsigned long main_thread = 0;
-/* Request for dropping the GIL */
-static _Py_atomic_int gil_drop_request = {0};
-
#include "ceval_gil.h"
int
@@ -168,9 +160,9 @@ PyEval_InitThreads(void)
return;
create_gil();
take_gil(PyThreadState_GET());
- main_thread = PyThread_get_thread_ident();
- if (!pending_lock)
- pending_lock = PyThread_allocate_lock();
+ _PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
+ if (!_PyRuntime.ceval.pending.lock)
+ _PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
}
void
@@ -238,9 +230,9 @@ PyEval_ReInitThreads(void)
if (!gil_created())
return;
recreate_gil();
- pending_lock = PyThread_allocate_lock();
+ _PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
take_gil(current_tstate);
- main_thread = PyThread_get_thread_ident();
+ _PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
/* Destroy all threads except the current one */
_PyThreadState_DeleteExcept(current_tstate);
@@ -279,7 +271,7 @@ PyEval_RestoreThread(PyThreadState *tstate)
int err = errno;
take_gil(tstate);
/* _Py_Finalizing is protected by the GIL */
- if (_Py_Finalizing && tstate != _Py_Finalizing) {
+ if (_Py_IsFinalizing() && !_Py_CURRENTLY_FINALIZING(tstate)) {
drop_gil(tstate);
PyThread_exit_thread();
assert(0); /* unreachable */
@@ -326,19 +318,11 @@ _PyEval_SignalReceived(void)
callback.
*/
-#define NPENDINGCALLS 32
-static struct {
- int (*func)(void *);
- void *arg;
-} pendingcalls[NPENDINGCALLS];
-static int pendingfirst = 0;
-static int pendinglast = 0;
-
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
int i, j, result=0;
- PyThread_type_lock lock = pending_lock;
+ PyThread_type_lock lock = _PyRuntime.ceval.pending.lock;
/* try a few times for the lock. Since this mechanism is used
* for signal handling (on the main thread), there is a (slim)
@@ -360,14 +344,14 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
return -1;
}
- i = pendinglast;
+ i = _PyRuntime.ceval.pending.last;
j = (i + 1) % NPENDINGCALLS;
- if (j == pendingfirst) {
+ if (j == _PyRuntime.ceval.pending.first) {
result = -1; /* Queue full */
} else {
- pendingcalls[i].func = func;
- pendingcalls[i].arg = arg;
- pendinglast = j;
+ _PyRuntime.ceval.pending.calls[i].func = func;
+ _PyRuntime.ceval.pending.calls[i].arg = arg;
+ _PyRuntime.ceval.pending.last = j;
}
/* signal main loop */
SIGNAL_PENDING_CALLS();
@@ -385,16 +369,19 @@ Py_MakePendingCalls(void)
assert(PyGILState_Check());
- if (!pending_lock) {
+ if (!_PyRuntime.ceval.pending.lock) {
/* initial allocation of the lock */
- pending_lock = PyThread_allocate_lock();
- if (pending_lock == NULL)
+ _PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
+ if (_PyRuntime.ceval.pending.lock == NULL)
return -1;
}
/* only service pending calls on main thread */
- if (main_thread && PyThread_get_thread_ident() != main_thread)
+ if (_PyRuntime.ceval.pending.main_thread &&
+ PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
+ {
return 0;
+ }
/* don't perform recursive pending calls */
if (busy)
return 0;
@@ -416,16 +403,16 @@ Py_MakePendingCalls(void)
void *arg = NULL;
/* pop one item off the queue while holding the lock */
- PyThread_acquire_lock(pending_lock, WAIT_LOCK);
- j = pendingfirst;
- if (j == pendinglast) {
+ PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK);
+ j = _PyRuntime.ceval.pending.first;
+ if (j == _PyRuntime.ceval.pending.last) {
func = NULL; /* Queue empty */
} else {
- func = pendingcalls[j].func;
- arg = pendingcalls[j].arg;
- pendingfirst = (j + 1) % NPENDINGCALLS;
+ func = _PyRuntime.ceval.pending.calls[j].func;
+ arg = _PyRuntime.ceval.pending.calls[j].arg;
+ _PyRuntime.ceval.pending.first = (j + 1) % NPENDINGCALLS;
}
- PyThread_release_lock(pending_lock);
+ PyThread_release_lock(_PyRuntime.ceval.pending.lock);
/* having released the lock, perform the callback */
if (func == NULL)
break;
@@ -444,26 +431,33 @@ error:
return -1;
}
-
/* The interpreter's recursion limit */
#ifndef Py_DEFAULT_RECURSION_LIMIT
#define Py_DEFAULT_RECURSION_LIMIT 1000
#endif
-static int recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
+
int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
+void
+_PyEval_Initialize(struct _ceval_runtime_state *state)
+{
+ state->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
+ _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
+ _gil_initialize(&state->gil);
+}
+
int
Py_GetRecursionLimit(void)
{
- return recursion_limit;
+ return _PyRuntime.ceval.recursion_limit;
}
void
Py_SetRecursionLimit(int new_limit)
{
- recursion_limit = new_limit;
- _Py_CheckRecursionLimit = recursion_limit;
+ _PyRuntime.ceval.recursion_limit = new_limit;
+ _Py_CheckRecursionLimit = _PyRuntime.ceval.recursion_limit;
}
/* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
@@ -475,6 +469,7 @@ int
_Py_CheckRecursiveCall(const char *where)
{
PyThreadState *tstate = PyThreadState_GET();
+ int recursion_limit = _PyRuntime.ceval.recursion_limit;
#ifdef USE_STACKCHECK
if (PyOS_CheckStack()) {
@@ -522,13 +517,7 @@ static void restore_and_clear_exc_state(PyThreadState *, PyFrameObject *);
static int do_raise(PyObject *, PyObject *);
static int unpack_iterable(PyObject *, int, int, PyObject **);
-/* Records whether tracing is on for any thread. Counts the number of
- threads for which tstate->c_tracefunc is non-NULL, so if the value
- is 0, we know we don't have to check this thread's c_tracefunc.
- This speeds up the if statement in PyEval_EvalFrameEx() after
- fast_next_opcode*/
-static int _Py_TracingPossible = 0;
-
+#define _Py_TracingPossible _PyRuntime.ceval.tracing_possible
PyObject *
@@ -659,7 +648,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
#define DISPATCH() \
{ \
- if (!_Py_atomic_load_relaxed(&eval_breaker)) { \
+ if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \
FAST_DISPATCH(); \
} \
continue; \
@@ -707,7 +696,8 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
/* Code access macros */
/* The integer overflow is checked by an assertion below. */
-#define INSTR_OFFSET() (sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
+#define INSTR_OFFSET() \
+ (sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#define NEXTOPARG() do { \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
@@ -960,7 +950,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */
- if (_Py_atomic_load_relaxed(&eval_breaker)) {
+ if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) {
if (_Py_OPCODE(*next_instr) == SETUP_FINALLY ||
_Py_OPCODE(*next_instr) == YIELD_FROM) {
/* Two cases where we skip running signal handlers and other
@@ -977,11 +967,15 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
*/
goto fast_next_opcode;
}
- if (_Py_atomic_load_relaxed(&pendingcalls_to_do)) {
+ if (_Py_atomic_load_relaxed(
+ &_PyRuntime.ceval.pending.calls_to_do))
+ {
if (Py_MakePendingCalls() < 0)
goto error;
}
- if (_Py_atomic_load_relaxed(&gil_drop_request)) {
+ if (_Py_atomic_load_relaxed(
+ &_PyRuntime.ceval.gil_drop_request))
+ {
/* Give another thread a chance */
if (PyThreadState_Swap(NULL) != tstate)
Py_FatalError("ceval: tstate mix-up");
@@ -992,7 +986,9 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
take_gil(tstate);
/* Check if we should make a quick exit. */
- if (_Py_Finalizing && _Py_Finalizing != tstate) {
+ if (_Py_IsFinalizing() &&
+ !_Py_CURRENTLY_FINALIZING(tstate))
+ {
drop_gil(tstate);
PyThread_exit_thread();
}