summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Lib/test/test_compile.py14
-rw-r--r--Python/compile.c88
2 files changed, 62 insertions, 40 deletions
diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py
index 21dcc1a..8bf8470 100644
--- a/Lib/test/test_compile.py
+++ b/Lib/test/test_compile.py
@@ -670,10 +670,22 @@ if 1:
self.assertIs(f1.__code__.co_linetable, f2.__code__.co_linetable)
+ @support.cpython_only
+ def test_strip_unused_consts(self):
+ def f():
+ "docstring"
+ if True:
+ return "used"
+ else:
+ return "unused"
+
+ self.assertEqual(f.__code__.co_consts,
+ ("docstring", True, "used"))
+
# Stripping unused constants is not a strict requirement for the
# Python semantics, it's a more an implementation detail.
@support.cpython_only
- def test_strip_unused_consts(self):
+ def test_strip_unused_None(self):
# Python 3.10rc1 appended None to co_consts when None is not used
# at all. See bpo-45056.
def f1():
diff --git a/Python/compile.c b/Python/compile.c
index 507fd04..2da36d0 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -912,6 +912,19 @@ cfg_builder_use_label(cfg_builder *g, jump_target_label lbl)
return cfg_builder_maybe_start_new_block(g);
}
+static inline int
+basicblock_append_instructions(basicblock *target, basicblock *source)
+{
+ for (int i = 0; i < source->b_iused; i++) {
+ int n = basicblock_next_instr(target);
+ if (n < 0) {
+ return -1;
+ }
+ target->b_instr[n] = source->b_instr[i];
+ }
+ return 0;
+}
+
static basicblock *
copy_basicblock(cfg_builder *g, basicblock *block)
{
@@ -923,12 +936,8 @@ copy_basicblock(cfg_builder *g, basicblock *block)
if (result == NULL) {
return NULL;
}
- for (int i = 0; i < block->b_iused; i++) {
- int n = basicblock_next_instr(result);
- if (n < 0) {
- return NULL;
- }
- result->b_instr[n] = block->b_instr[i];
+ if (basicblock_append_instructions(result, block) < 0) {
+ return NULL;
}
return result;
}
@@ -7080,15 +7089,14 @@ stackdepth(basicblock *entryblock, int code_flags)
if (new_depth > maxdepth) {
maxdepth = new_depth;
}
- assert(depth >= 0); /* invalid code or bug in stackdepth() */
if (HAS_TARGET(instr->i_opcode)) {
effect = stack_effect(instr->i_opcode, instr->i_oparg, 1);
assert(effect != PY_INVALID_STACK_EFFECT);
int target_depth = depth + effect;
+ assert(target_depth >= 0); /* invalid code or bug in stackdepth() */
if (target_depth > maxdepth) {
maxdepth = target_depth;
}
- assert(target_depth >= 0); /* invalid code or bug in stackdepth() */
stackdepth_push(&sp, instr->i_target, target_depth);
}
depth = new_depth;
@@ -7487,6 +7495,9 @@ convert_exception_handlers_to_nops(basicblock *entryblock) {
}
}
}
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ remove_redundant_nops(b);
+ }
}
static inline void
@@ -7964,8 +7975,8 @@ scan_block_for_local(int target, basicblock *b, bool unsafe_to_start,
#undef MAYBE_PUSH
static int
-add_checks_for_loads_of_unknown_variables(basicblock *entryblock,
- struct compiler *c)
+add_checks_for_loads_of_uninitialized_variables(basicblock *entryblock,
+ struct compiler *c)
{
basicblock **stack = make_cfg_traversal_stack(entryblock);
if (stack == NULL) {
@@ -8291,7 +8302,7 @@ dump_basicblock(const basicblock *b)
static int
-calculate_jump_targets(basicblock *entryblock);
+translate_jump_labels_to_targets(basicblock *entryblock);
static int
optimize_cfg(cfg_builder *g, PyObject *consts, PyObject *const_cache);
@@ -8628,11 +8639,9 @@ assemble(struct compiler *c, int addNone)
}
nlocalsplus -= numdropped;
- consts = consts_dict_keys_inorder(c->u->u_consts);
- if (consts == NULL) {
- goto error;
- }
- if (calculate_jump_targets(g->g_entryblock)) {
+ /** Preprocessing **/
+ /* Map labels to targets and mark exception handlers */
+ if (translate_jump_labels_to_targets(g->g_entryblock)) {
goto error;
}
if (mark_except_handlers(g->g_entryblock) < 0) {
@@ -8641,18 +8650,31 @@ assemble(struct compiler *c, int addNone)
if (label_exception_targets(g->g_entryblock)) {
goto error;
}
+
+ /** Optimization **/
+ consts = consts_dict_keys_inorder(c->u->u_consts);
+ if (consts == NULL) {
+ goto error;
+ }
if (optimize_cfg(g, consts, c->c_const_cache)) {
goto error;
}
- if (trim_unused_consts(g->g_entryblock, consts)) {
+ if (add_checks_for_loads_of_uninitialized_variables(g->g_entryblock, c) < 0) {
goto error;
}
+
+ /** line numbers (TODO: move this before optimization stage) */
if (duplicate_exits_without_lineno(g) < 0) {
goto error;
}
propagate_line_numbers(g->g_entryblock);
guarantee_lineno_for_exits(g->g_entryblock, c->u->u_firstlineno);
+ if (push_cold_blocks_to_end(g, code_flags) < 0) {
+ goto error;
+ }
+
+ /** Assembly **/
int maxdepth = stackdepth(g->g_entryblock, code_flags);
if (maxdepth < 0) {
goto error;
@@ -8661,27 +8683,19 @@ assemble(struct compiler *c, int addNone)
convert_exception_handlers_to_nops(g->g_entryblock);
- if (push_cold_blocks_to_end(g, code_flags) < 0) {
- goto error;
- }
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- remove_redundant_nops(b);
- }
-
/* Order of basic blocks must have been determined by now */
if (normalize_jumps(g) < 0) {
goto error;
}
- if (add_checks_for_loads_of_unknown_variables(g->g_entryblock, c) < 0) {
- goto error;
- }
-
assert(no_redundant_jumps(g));
/* Can't modify the bytecode after computing jump offsets. */
assemble_jump_offsets(g->g_entryblock);
+ if (trim_unused_consts(g->g_entryblock, consts)) {
+ goto error;
+ }
/* Create assembler */
if (!assemble_init(&a, c->u->u_firstlineno))
@@ -9265,12 +9279,8 @@ inline_small_exit_blocks(basicblock *bb) {
basicblock *target = last->i_target;
if (basicblock_exits_scope(target) && target->b_iused <= MAX_COPY_SIZE) {
last->i_opcode = NOP;
- for (int i = 0; i < target->b_iused; i++) {
- int index = basicblock_next_instr(bb);
- if (index < 0) {
- return -1;
- }
- bb->b_instr[index] = target->b_instr[i];
+ if (basicblock_append_instructions(bb, target) < 0) {
+ return -1;
}
return 1;
}
@@ -9456,7 +9466,7 @@ propagate_line_numbers(basicblock *entryblock) {
/* Calculate the actual jump target from the target_label */
static int
-calculate_jump_targets(basicblock *entryblock)
+translate_jump_labels_to_targets(basicblock *entryblock)
{
int max_label = -1;
for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
@@ -9599,12 +9609,14 @@ is_exit_without_lineno(basicblock *b) {
static int
duplicate_exits_without_lineno(cfg_builder *g)
{
+ assert(no_empty_basic_blocks(g));
/* Copy all exit blocks without line number that are targets of a jump.
*/
basicblock *entryblock = g->g_entryblock;
for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
struct instr *last = basicblock_last_instr(b);
- if (last != NULL && is_jump(last)) {
+ assert(last != NULL);
+ if (is_jump(last)) {
basicblock *target = last->i_target;
if (is_exit_without_lineno(target) && target->b_predecessors > 1) {
basicblock *new_target = copy_basicblock(g, target);
@@ -9621,8 +9633,6 @@ duplicate_exits_without_lineno(cfg_builder *g)
}
}
- assert(no_empty_basic_blocks(g));
-
/* Any remaining reachable exit blocks without line number can only be reached by
* fall through, and thus can only have a single predecessor */
for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
@@ -9775,7 +9785,7 @@ _PyCompile_OptimizeCfg(PyObject *instructions, PyObject *consts)
if (const_cache == NULL) {
goto error;
}
- if (calculate_jump_targets(g.g_entryblock)) {
+ if (translate_jump_labels_to_targets(g.g_entryblock)) {
goto error;
}
if (optimize_cfg(&g, consts, const_cache) < 0) {