diff options
author | Raymond Hettinger <python@rcn.com> | 2003-04-24 05:45:23 (GMT) |
---|---|---|
committer | Raymond Hettinger <python@rcn.com> | 2003-04-24 05:45:23 (GMT) |
commit | f4cf76dd5eabd8af5d2a866da1f75fa745aa6f29 (patch) | |
tree | 706a194da1728cb6e6907e4a74351df2bcccfc39 /Python | |
parent | 7d618c731c4505ab92f085f97faa475ac3ad72d4 (diff) | |
download | cpython-f4cf76dd5eabd8af5d2a866da1f75fa745aa6f29.zip cpython-f4cf76dd5eabd8af5d2a866da1f75fa745aa6f29.tar.gz cpython-f4cf76dd5eabd8af5d2a866da1f75fa745aa6f29.tar.bz2 |
Revert the previous enhancement to the bytecode optimizer.
The additional code complexity and new NOP opcode were not worth it.
Diffstat (limited to 'Python')
-rw-r--r-- | Python/ceval.c | 3 | ||||
-rw-r--r-- | Python/compile.c | 92 |
2 files changed, 9 insertions, 86 deletions
diff --git a/Python/ceval.c b/Python/ceval.c index 7f8f654..3ea1bdc 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -873,9 +873,6 @@ eval_frame(PyFrameObject *f) /* case STOP_CODE: this is an error! */ - case NOP: - goto fast_next_opcode; - case LOAD_FAST: x = GETLOCAL(oparg); if (x != NULL) { diff --git a/Python/compile.c b/Python/compile.c index 4afd0eb..57f0edb 100644 --- a/Python/compile.c +++ b/Python/compile.c @@ -328,43 +328,6 @@ intern_strings(PyObject *tuple) #define ABSOLUTE_JUMP(op) (op==JUMP_ABSOLUTE || op==CONTINUE_LOOP) #define GETJUMPTGT(arr, i) (GETARG(arr,i) + (ABSOLUTE_JUMP(arr[i]) ? 0 : i+3)) #define SETARG(arr, i, val) arr[i+2] = val>>8; arr[i+1] = val & 255 -#define CODESIZE(op) (HAS_ARG(op) ? 3 : 1) -#define ISBASICBLOCK(blocks, start, bytes) (blocks[start]==blocks[start+bytes-1]) - -static unsigned int * -markblocks(unsigned char *code, int len) -{ - unsigned int *blocks = PyMem_Malloc(len*sizeof(int)); - int i,j, opcode, oldblock, newblock, blockcnt = 0; - - if (blocks == NULL) - return NULL; - memset(blocks, 0, len*sizeof(int)); - for (i=0 ; i<len ; i+=CODESIZE(opcode)) { - opcode = code[i]; - switch (opcode) { - case FOR_ITER: - case JUMP_FORWARD: - case JUMP_IF_FALSE: - case JUMP_IF_TRUE: - case JUMP_ABSOLUTE: - case CONTINUE_LOOP: - case SETUP_LOOP: - case SETUP_EXCEPT: - case SETUP_FINALLY: - j = GETJUMPTGT(code, i); - oldblock = blocks[j]; - newblock = ++blockcnt; - for (; j<len ; j++) { - if (blocks[j] != (unsigned)oldblock) - break; - blocks[j] = newblock; - } - break; - } - } - return blocks; -} static PyObject * optimize_code(PyObject *code, PyObject* consts) @@ -372,24 +335,18 @@ optimize_code(PyObject *code, PyObject* consts) int i, j, codelen; int tgt, tgttgt, opcode; unsigned char *codestr; - unsigned int *blocks; /* Make a modifiable copy of the code string */ if (!PyString_Check(code)) goto exitUnchanged; codelen = PyString_Size(code); codestr = PyMem_Malloc(codelen); - if (codestr == NULL) + if (codestr == NULL) goto exitUnchanged; codestr = memcpy(codestr, PyString_AS_STRING(code), codelen); - blocks = markblocks(codestr, codelen); - if (blocks == NULL) { - PyMem_Free(codestr); - goto exitUnchanged; - } assert(PyTuple_Check(consts)); - for (i=0 ; i<codelen ; i += CODESIZE(codestr[i])) { + for (i=0 ; i<codelen-7 ; i += HAS_ARG(codestr[i]) ? 3 : 1) { opcode = codestr[i]; switch (opcode) { @@ -406,8 +363,8 @@ optimize_code(PyObject *code, PyObject* consts) SETARG(codestr, i, 4); break; - /* Replace BUILD_SEQN 2 UNPACK_SEQN 2 with ROT2 JMP+2 NOP NOP. - Replace BUILD_SEQN 3 UNPACK_SEQN 3 with ROT3 ROT2 JMP+1 NOP. + /* Replace BUILD_SEQN 2 UNPACK_SEQN 2 with ROT2 JMP+2. + Replace BUILD_SEQN 3 UNPACK_SEQN 3 with ROT3 ROT2 JMP+1. Note, these opcodes occur together only in assignment statements. Accordingly, the unpack opcode is never a jump target. */ @@ -420,8 +377,8 @@ optimize_code(PyObject *code, PyObject* consts) codestr[i] = ROT_TWO; codestr[i+1] = JUMP_FORWARD; SETARG(codestr, i+1, 2); - codestr[i+4] = NOP; - codestr[i+5] = NOP; + codestr[i+4] = DUP_TOP; /* Filler codes used as NOPs */ + codestr[i+5] = POP_TOP; continue; } if (GETARG(codestr, i) == 3 && \ @@ -429,41 +386,11 @@ optimize_code(PyObject *code, PyObject* consts) codestr[i] = ROT_THREE; codestr[i+1] = ROT_TWO; codestr[i+2] = JUMP_FORWARD; - SETARG(codestr, i+2, 1); - codestr[i+5] = NOP; + SETARG(codestr, i+2, 1); + codestr[i+5] = DUP_TOP; } break; - /* Simplify inverted tests. - Must verify that sequence is a basic block because the jump - can itself be a jump target. Also, must verify that *both* - jump alternatives go to a POP_TOP. Otherwise, the code will - expect the stack value to have been inverted. */ - case UNARY_NOT: - if (codestr[i+1] != JUMP_IF_FALSE || \ - codestr[i+4] != POP_TOP || \ - !ISBASICBLOCK(blocks,i,5)) - continue; - tgt = GETJUMPTGT(codestr, (i+1)); - if (codestr[tgt] != POP_TOP) - continue; - codestr[i] = NOP; - codestr[i+1] = JUMP_IF_TRUE; - break; - - /* not a is b --> a is not b - not a in b --> a not in b - not a is not b --> a is b - not a not in b --> a in b */ - case COMPARE_OP: - j = GETARG(codestr, i); - if (codestr[i+3] != UNARY_NOT || j < 6 || \ - j > 9 || !ISBASICBLOCK(blocks,i,4)) - continue; - SETARG(codestr, i, (j^1)); - codestr[i+3] = NOP; - break; - /* Replace jumps to unconditional jumps */ case FOR_ITER: case JUMP_FORWARD: @@ -475,7 +402,7 @@ optimize_code(PyObject *code, PyObject* consts) case SETUP_EXCEPT: case SETUP_FINALLY: tgt = GETJUMPTGT(codestr, i); - if (!UNCONDITIONAL_JUMP(codestr[tgt])) + if (!UNCONDITIONAL_JUMP(codestr[tgt])) continue; tgttgt = GETJUMPTGT(codestr, tgt); if (opcode == JUMP_FORWARD) /* JMP_ABS can go backwards */ @@ -495,7 +422,6 @@ optimize_code(PyObject *code, PyObject* consts) } code = PyString_FromStringAndSize(codestr, codelen); PyMem_Free(codestr); - PyMem_Free(blocks); return code; exitUnchanged: |