summaryrefslogtreecommitdiffstats
path: root/Python/optimizer.c
diff options
context:
space:
mode:
authorGuido van Rossum <guido@python.org>2023-06-28 18:28:07 (GMT)
committerGitHub <noreply@github.com>2023-06-28 18:28:07 (GMT)
commit11731434df2d7d29b4260e5ad65b993cea775c36 (patch)
treeef3afdb759174a31e607534a0daa3d8a4606bd28 /Python/optimizer.c
parentc283a0cff5603540f06d9017e484b3602cc62e7c (diff)
downloadcpython-11731434df2d7d29b4260e5ad65b993cea775c36.zip
cpython-11731434df2d7d29b4260e5ad65b993cea775c36.tar.gz
cpython-11731434df2d7d29b4260e5ad65b993cea775c36.tar.bz2
gh-104584: Emit macro expansions to opcode_metadata.h (#106163)
This produces longer traces (superblocks?). Also improved debug output (uop names are now printed instead of numeric opcodes). This would be simpler if the numeric opcode values were generated by generate_cases.py, but that's another project. Refactored some code in generate_cases.py so the essential algorithm for cache effects is only run once. (Deciding which effects are used and what the total cache size is, regardless of what's used.)
Diffstat (limited to 'Python/optimizer.c')
-rw-r--r--Python/optimizer.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/Python/optimizer.c b/Python/optimizer.c
index 0a6cc5c..9d77ab4 100644
--- a/Python/optimizer.c
+++ b/Python/optimizer.c
@@ -325,8 +325,8 @@ translate_bytecode_to_trace(
}
#define ADD_TO_TRACE(OPCODE, OPERAND) \
if (lltrace >= 2) { \
- const char *opname = (OPCODE) < 256 ? _PyOpcode_OpName[(OPCODE)] : ""; \
- fprintf(stderr, " ADD_TO_TRACE(%s %d, %" PRIu64 ")\n", opname, (OPCODE), (uint64_t)(OPERAND)); \
+ const char *opname = (OPCODE) < 256 ? _PyOpcode_OpName[(OPCODE)] : _PyOpcode_uop_name[(OPCODE)]; \
+ fprintf(stderr, " ADD_TO_TRACE(%s, %" PRIu64 ")\n", opname, (uint64_t)(OPERAND)); \
} \
trace[trace_length].opcode = (OPCODE); \
trace[trace_length].operand = (OPERAND); \
@@ -474,6 +474,8 @@ PyUnstable_Optimizer_NewUOpOptimizer(void)
}
opt->optimize = uop_optimize;
opt->resume_threshold = UINT16_MAX;
- opt->backedge_threshold = 0;
+ // Need at least 3 iterations to settle specializations.
+ // A few lower bits of the counter are reserved for other flags.
+ opt->backedge_threshold = 3 << OPTIMIZER_BITS_IN_COUNTER;
return (PyObject *)opt;
}