summaryrefslogtreecommitdiffstats
path: root/Python
diff options
context:
space:
mode:
authorBenjamin Peterson <benjamin@python.org>2009-11-20 02:15:50 (GMT)
committerBenjamin Peterson <benjamin@python.org>2009-11-20 02:15:50 (GMT)
commitcef9782810ab6b4e271d55633e8d766525b9d673 (patch)
tree4514727ae049e619ad9ba0f32708810bded5a99f /Python
parent009b89d22a02af5120e07fb1e8970bf35f45926f (diff)
downloadcpython-cef9782810ab6b4e271d55633e8d766525b9d673.zip
cpython-cef9782810ab6b4e271d55633e8d766525b9d673.tar.gz
cpython-cef9782810ab6b4e271d55633e8d766525b9d673.tar.bz2
turn goto into do while loop
Diffstat (limited to 'Python')
-rw-r--r--Python/compile.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/Python/compile.c b/Python/compile.c
index a35cda1..35ee48b 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -3586,49 +3586,47 @@ static void
assemble_jump_offsets(struct assembler *a, struct compiler *c)
{
basicblock *b;
- int bsize, totsize, extended_arg_count, last_extended_arg_count = 0;
+ int bsize, totsize, extended_arg_count = 0, last_extended_arg_count;
int i;
/* Compute the size of each block and fixup jump args.
Replace block pointer with position in bytecode. */
-start:
- totsize = 0;
- for (i = a->a_nblocks - 1; i >= 0; i--) {
- b = a->a_postorder[i];
- bsize = blocksize(b);
- b->b_offset = totsize;
- totsize += bsize;
- }
- extended_arg_count = 0;
- for (b = c->u->u_blocks; b != NULL; b = b->b_list) {
- bsize = b->b_offset;
- for (i = 0; i < b->b_iused; i++) {
- struct instr *instr = &b->b_instr[i];
- /* Relative jumps are computed relative to
- the instruction pointer after fetching
- the jump instruction.
- */
- bsize += instrsize(instr);
- if (instr->i_jabs)
- instr->i_oparg = instr->i_target->b_offset;
- else if (instr->i_jrel) {
- int delta = instr->i_target->b_offset - bsize;
- instr->i_oparg = delta;
+ do {
+ totsize = 0;
+ for (i = a->a_nblocks - 1; i >= 0; i--) {
+ b = a->a_postorder[i];
+ bsize = blocksize(b);
+ b->b_offset = totsize;
+ totsize += bsize;
+ }
+ last_extended_arg_count = extended_arg_count;
+ extended_arg_count = 0;
+ for (b = c->u->u_blocks; b != NULL; b = b->b_list) {
+ bsize = b->b_offset;
+ for (i = 0; i < b->b_iused; i++) {
+ struct instr *instr = &b->b_instr[i];
+ /* Relative jumps are computed relative to
+ the instruction pointer after fetching
+ the jump instruction.
+ */
+ bsize += instrsize(instr);
+ if (instr->i_jabs)
+ instr->i_oparg = instr->i_target->b_offset;
+ else if (instr->i_jrel) {
+ int delta = instr->i_target->b_offset - bsize;
+ instr->i_oparg = delta;
+ }
+ else
+ continue;
+ if (instr->i_oparg > 0xffff)
+ extended_arg_count++;
}
- else
- continue;
- if (instr->i_oparg > 0xffff)
- extended_arg_count++;
}
- }
/* XXX: This is an awful hack that could hurt performance, but
on the bright side it should work until we come up
with a better solution.
- In the meantime, should the goto be dropped in favor
- of a loop?
-
The issue is that in the first loop blocksize() is called
which calls instrsize() which requires i_oparg be set
appropriately. There is a bootstrap problem because
@@ -3639,10 +3637,7 @@ start:
ones in jump instructions. So this should converge
fairly quickly.
*/
- if (last_extended_arg_count != extended_arg_count) {
- last_extended_arg_count = extended_arg_count;
- goto start;
- }
+ } while (last_extended_arg_count != extended_arg_count);
}
static PyObject *