summaryrefslogtreecommitdiffstats
path: root/Python/optimizer.c
diff options
context:
space:
mode:
authorGuido van Rossum <guido@python.org>2023-11-01 20:13:02 (GMT)
committerGitHub <noreply@github.com>2023-11-01 20:13:02 (GMT)
commit7e135a48d619407cd4b2a6d80a4ce204b2f5f938 (patch)
treee0f063e3993696fc700092f50a1cee81f97974ff /Python/optimizer.c
parent5d6db168b9cda58b4897763041a6109b93e421cb (diff)
downloadcpython-7e135a48d619407cd4b2a6d80a4ce204b2f5f938.zip
cpython-7e135a48d619407cd4b2a6d80a4ce204b2f5f938.tar.gz
cpython-7e135a48d619407cd4b2a6d80a4ce204b2f5f938.tar.bz2
gh-111520: Integrate the Tier 2 interpreter in the Tier 1 interpreter (#111428)
- There is no longer a separate Python/executor.c file. - Conventions in Python/bytecodes.c are slightly different -- don't use `goto error`, you must use `GOTO_ERROR(error)` (same for others like `unused_local_error`). - The `TIER_ONE` and `TIER_TWO` symbols are only valid in the generated (.c.h) files. - In Lib/test/support/__init__.py, `Py_C_RECURSION_LIMIT` is imported from `_testcapi`. - On Windows, in debug mode, stack allocation grows from 8MiB to 12MiB. - **Beware!** This changes the env vars to enable uops and their debugging to `PYTHON_UOPS` and `PYTHON_LLTRACE`.
Diffstat (limited to 'Python/optimizer.c')
-rw-r--r--Python/optimizer.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/Python/optimizer.c b/Python/optimizer.c
index c239b03..0e5b437 100644
--- a/Python/optimizer.c
+++ b/Python/optimizer.c
@@ -433,10 +433,10 @@ translate_bytecode_to_trace(
int trace_stack_depth = 0;
#ifdef Py_DEBUG
- char *uop_debug = Py_GETENV("PYTHONUOPSDEBUG");
+ char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
int lltrace = 0;
- if (uop_debug != NULL && *uop_debug >= '0') {
- lltrace = *uop_debug - '0'; // TODO: Parse an int and all that
+ if (python_lltrace != NULL && *python_lltrace >= '0') {
+ lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that
}
#endif
@@ -881,10 +881,10 @@ remove_unneeded_uops(_PyUOpInstruction *trace, int trace_length)
if (dest < last_instr) {
int new_trace_length = move_stubs(trace, dest, last_instr, trace_length);
#ifdef Py_DEBUG
- char *uop_debug = Py_GETENV("PYTHONUOPSDEBUG");
+ char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
int lltrace = 0;
- if (uop_debug != NULL && *uop_debug >= '0') {
- lltrace = *uop_debug - '0'; // TODO: Parse an int and all that
+ if (python_lltrace != NULL && *python_lltrace >= '0') {
+ lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that
}
if (lltrace >= 2) {
printf("Optimized trace (length %d+%d = %d, saved %d):\n",
@@ -939,6 +939,15 @@ uop_optimize(
return 1;
}
+/* Dummy execute() function for Uop Executor.
+ * The actual implementation is inlined in ceval.c,
+ * in _PyEval_EvalFrameDefault(). */
+_PyInterpreterFrame *
+_PyUopExecute(_PyExecutorObject *executor, _PyInterpreterFrame *frame, PyObject **stack_pointer)
+{
+ Py_FatalError("Tier 2 is now inlined into Tier 1");
+}
+
static void
uop_opt_dealloc(PyObject *self) {
PyObject_Free(self);