diff options
Diffstat (limited to 'Python')
-rw-r--r-- | Python/ceval.c | 203 | ||||
-rw-r--r-- | Python/compile.c | 45 | ||||
-rw-r--r-- | Python/frozen.c | 12 | ||||
-rw-r--r-- | Python/import.c | 6 | ||||
-rw-r--r-- | Python/traceback.c | 11 |
5 files changed, 199 insertions, 78 deletions
diff --git a/Python/ceval.c b/Python/ceval.c index 50ea9c3..6985846 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -51,6 +51,9 @@ static int call_trace(Py_tracefunc, PyObject *, PyFrameObject *, static void call_trace_protected(Py_tracefunc, PyObject *, PyFrameObject *, int); static void call_exc_trace(Py_tracefunc, PyObject *, PyFrameObject *); +static void maybe_call_line_trace(int, Py_tracefunc, PyObject *, + PyFrameObject *, int *, int *); + static PyObject *apply_slice(PyObject *, PyObject *, PyObject *); static int assign_slice(PyObject *, PyObject *, PyObject *, PyObject *); @@ -499,6 +502,16 @@ eval_frame(PyFrameObject *f) PyObject *retval = NULL; /* Return value */ PyThreadState *tstate = PyThreadState_GET(); PyCodeObject *co; + + /* when tracing we set things up so that + + not (instr_lb <= current_bytecode_offset < instr_ub) + + is true when the line being executed has changed. The + initial values are such as to make this false the first + time it is tested. */ + int instr_ub = -1, instr_lb = 0; + unsigned char *first_instr; PyObject *names; PyObject *consts; @@ -586,7 +599,12 @@ eval_frame(PyFrameObject *f) fastlocals = f->f_localsplus; freevars = f->f_localsplus + f->f_nlocals; _PyCode_GETCODEPTR(co, &first_instr); - next_instr = first_instr + f->f_lasti; + if (f->f_lasti < 0) { + next_instr = first_instr; + } + else { + next_instr = first_instr + f->f_lasti; + } stack_pointer = f->f_stacktop; assert(stack_pointer != NULL); f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */ @@ -637,8 +655,9 @@ eval_frame(PyFrameObject *f) w = NULL; for (;;) { - assert(stack_pointer >= f->f_valuestack); /* else underflow */ - assert(STACK_LEVEL() <= f->f_stacksize); /* else overflow */ + assert(stack_pointer >= f->f_valuestack); /* else underflow */ + assert(STACK_LEVEL() <= f->f_stacksize); /* else overflow */ + /* Do periodic things. Doing this every time through the loop would add too much overhead, so we do it only every Nth instruction. We also do it if @@ -658,8 +677,8 @@ eval_frame(PyFrameObject *f) #if !defined(HAVE_SIGNAL_H) || defined(macintosh) /* If we have true signals, the signal handler will call Py_AddPendingCall() so we don't - have to call sigcheck(). On the Mac and - DOS, alas, we have to call it. */ + have to call PyErr_CheckSignals(). On the + Mac and DOS, alas, we have to call it. */ if (PyErr_CheckSignals()) { why = WHY_EXCEPTION; goto on_error; @@ -686,9 +705,7 @@ eval_frame(PyFrameObject *f) fast_next_opcode: /* Extract opcode and argument */ -#if defined(Py_DEBUG) || defined(LLTRACE) f->f_lasti = INSTR_OFFSET(); -#endif opcode = NEXTOP(); if (HAS_ARG(opcode)) @@ -708,15 +725,26 @@ eval_frame(PyFrameObject *f) if (lltrace) { if (HAS_ARG(opcode)) { printf("%d: %d, %d\n", - (int) (INSTR_OFFSET() - 3), - opcode, oparg); + f->f_lasti, opcode, oparg); } else { printf("%d: %d\n", - (int) (INSTR_OFFSET() - 1), opcode); + f->f_lasti, opcode); } } #endif + + /* line-by-line tracing support */ + + if (tstate->c_tracefunc != NULL && !tstate->tracing) { + /* see maybe_call_line_trace + for expository comments */ + maybe_call_line_trace(opcode, + tstate->c_tracefunc, + tstate->c_traceobj, + f, &instr_lb, &instr_ub); + } + /* Main switch on opcode */ switch (opcode) { @@ -728,26 +756,6 @@ eval_frame(PyFrameObject *f) /* case STOP_CODE: this is an error! */ - case SET_LINENO: -#ifdef LLTRACE - if (lltrace) - printf("--- %s:%d \n", filename, oparg); -#endif - f->f_lineno = oparg; - if (tstate->c_tracefunc == NULL || tstate->tracing) - goto fast_next_opcode; - /* Trace each line of code reached */ - f->f_lasti = INSTR_OFFSET(); - /* Inline call_trace() for performance: */ - tstate->tracing++; - tstate->use_tracing = 0; - err = (tstate->c_tracefunc)(tstate->c_traceobj, f, - PyTrace_LINE, Py_None); - tstate->use_tracing = (tstate->c_tracefunc - || tstate->c_profilefunc); - tstate->tracing--; - break; - case LOAD_FAST: x = GETLOCAL(oparg); if (x != NULL) { @@ -1504,9 +1512,17 @@ eval_frame(PyFrameObject *f) why = WHY_RETURN; break; + case RETURN_NONE: + retval = Py_None; + Py_INCREF(retval); + why = WHY_RETURN; + break; + case YIELD_VALUE: retval = POP(); f->f_stacktop = stack_pointer; + /* abuse the lasti field: here it points to + the *next* instruction */ f->f_lasti = INSTR_OFFSET(); why = WHY_YIELD; break; @@ -1954,7 +1970,6 @@ eval_frame(PyFrameObject *f) int n = na + 2 * nk; PyObject **pfunc = stack_pointer - n - 1; PyObject *func = *pfunc; - f->f_lasti = INSTR_OFFSET() - 3; /* For tracing */ /* Always dispatch PyCFunction first, because these are presumed to be the most frequent @@ -2022,7 +2037,6 @@ eval_frame(PyFrameObject *f) n++; pfunc = stack_pointer - n - 1; func = *pfunc; - f->f_lasti = INSTR_OFFSET() - 3; /* For tracing */ if (PyMethod_Check(func) && PyMethod_GET_SELF(func) != NULL) { @@ -2134,7 +2148,8 @@ eval_frame(PyFrameObject *f) default: fprintf(stderr, "XXX lineno: %d, opcode: %d\n", - f->f_lineno, opcode); + PyCode_Addr2Line(f->f_code, f->f_lasti), + opcode); PyErr_SetString(PyExc_SystemError, "unknown opcode"); why = WHY_EXCEPTION; break; @@ -2189,9 +2204,6 @@ eval_frame(PyFrameObject *f) /* Log traceback info if this is a real exception */ if (why == WHY_EXCEPTION) { - f->f_lasti = INSTR_OFFSET() - 1; - if (HAS_ARG(opcode)) - f->f_lasti -= 2; PyTraceBack_Here(f); if (tstate->c_tracefunc != NULL) @@ -2875,6 +2887,125 @@ call_trace(Py_tracefunc func, PyObject *obj, PyFrameObject *frame, return result; } +static void +maybe_call_line_trace(int opcode, Py_tracefunc func, PyObject *obj, + PyFrameObject *frame, int *instr_lb, int *instr_ub) +{ + /* The theory of SET_LINENO-less tracing. + + In a nutshell, we use the co_lnotab field of the code object + to tell when execution has moved onto a different line. + + As mentioned above, the basic idea is so set things up so + that + + *instr_lb <= frame->f_lasti < *instr_ub + + is true so long as execution does not change lines. + + This is all fairly simple. Digging the information out of + co_lnotab takes some work, but is conceptually clear. + + Somewhat harder to explain is why we don't call the line + trace function when executing a POP_TOP or RETURN_NONE + opcodes. An example probably serves best. + + Consider this code: + + 1: def f(a): + 2: if a: + 3: print 1 + 4: else: + 5: print 2 + + which compiles to this: + + 2 0 LOAD_FAST 0 (a) + 3 JUMP_IF_FALSE 9 (to 15) + 6 POP_TOP + + 3 7 LOAD_CONST 1 (1) + 10 PRINT_ITEM + 11 PRINT_NEWLINE + 12 JUMP_FORWARD 6 (to 21) + >> 15 POP_TOP + + 5 16 LOAD_CONST 2 (2) + 19 PRINT_ITEM + 20 PRINT_NEWLINE + >> 21 RETURN_NONE + + If a is false, execution will jump to instruction at offset + 15 and the co_lnotab will claim that execution has moved to + line 3. This is at best misleading. In this case we could + associate the POP_TOP with line 4, but that doesn't make + sense in all cases (I think). + + On the other hand, if a is true, execution will jump from + instruction offset 12 to offset 21. Then the co_lnotab would + imply that execution has moved to line 5, which is again + misleading. + + This is why it is important that RETURN_NONE is *only* used + for the "falling off the end of the function" form of + returning None -- using it for code like + + 1: def f(): + 2: return + + would, once again, lead to misleading tracing behaviour. + + It is also worth mentioning that getting tracing behaviour + right is the *entire* motivation for adding the RETURN_NONE + opcode. + */ + + if (opcode != POP_TOP && opcode != RETURN_NONE && + (frame->f_lasti < *instr_lb || frame->f_lasti > *instr_ub)) { + PyCodeObject* co = frame->f_code; + int size, addr; + unsigned char* p; + + call_trace(func, obj, frame, PyTrace_LINE, Py_None); + + size = PyString_Size(co->co_lnotab) / 2; + p = (unsigned char*)PyString_AsString(co->co_lnotab); + + /* possible optimization: if f->f_lasti == instr_ub + (likely to be a common case) then we already know + instr_lb -- if we stored the matching value of p + somwhere we could skip the first while loop. */ + + addr = 0; + + /* see comments in compile.c for the description of + co_lnotab. A point to remember: increments to p + should come in pairs -- although we don't care about + the line increments here, treating them as byte + increments gets confusing, to say the least. */ + + while (size >= 0) { + if (addr + *p > frame->f_lasti) + break; + addr += *p++; + p++; + --size; + } + *instr_lb = addr; + if (size > 0) { + while (--size >= 0) { + addr += *p++; + if (*p++) + break; + } + *instr_ub = addr; + } + else { + *instr_ub = INT_MAX; + } + } +} + void PyEval_SetProfile(Py_tracefunc func, PyObject *arg) { diff --git a/Python/compile.c b/Python/compile.c index 5b4e8e6..ff8f4a5 100644 --- a/Python/compile.c +++ b/Python/compile.c @@ -407,9 +407,10 @@ PyCode_New(int argcount, int nlocals, int stacksize, int flags, /* All about c_lnotab. -c_lnotab is an array of unsigned bytes disguised as a Python string. In -O -mode, SET_LINENO opcodes aren't generated, and bytecode offsets are mapped -to source code line #s (when needed for tracebacks) via c_lnotab instead. +c_lnotab is an array of unsigned bytes disguised as a Python string. Since +version 2.3, SET_LINENO opcodes are never generated and bytecode offsets are +mapped to source code line #s via c_lnotab instead. + The array is conceptually a list of (bytecode offset increment, line number increment) pairs. The details are important and delicate, best illustrated by example: @@ -830,11 +831,6 @@ static void com_addoparg(struct compiling *c, int op, int arg) { int extended_arg = arg >> 16; - if (op == SET_LINENO) { - com_set_lineno(c, arg); - if (Py_OptimizeFlag) - return; - } if (extended_arg){ com_addbyte(c, EXTENDED_ARG); com_addint(c, extended_arg); @@ -1738,7 +1734,7 @@ com_call_function(struct compiling *c, node *n) break; if (ch->n_lineno != lineno) { lineno = ch->n_lineno; - com_addoparg(c, SET_LINENO, lineno); + com_set_lineno(c, lineno); } com_argument(c, ch, &keywords); if (keywords == NULL) @@ -3168,7 +3164,7 @@ com_if_stmt(struct compiling *c, node *n) continue; } if (i > 0) - com_addoparg(c, SET_LINENO, ch->n_lineno); + com_set_lineno(c, ch->n_lineno); com_node(c, ch); com_addfwref(c, JUMP_IF_FALSE, &a); com_addbyte(c, POP_TOP); @@ -3195,7 +3191,7 @@ com_while_stmt(struct compiling *c, node *n) com_addfwref(c, SETUP_LOOP, &break_anchor); block_push(c, SETUP_LOOP); c->c_begin = c->c_nexti; - com_addoparg(c, SET_LINENO, n->n_lineno); + com_set_lineno(c, n->n_lineno); com_node(c, CHILD(n, 1)); com_addfwref(c, JUMP_IF_FALSE, &anchor); com_addbyte(c, POP_TOP); @@ -3228,7 +3224,7 @@ com_for_stmt(struct compiling *c, node *n) com_node(c, CHILD(n, 3)); com_addbyte(c, GET_ITER); c->c_begin = c->c_nexti; - com_addoparg(c, SET_LINENO, n->n_lineno); + com_set_lineno(c, n->n_lineno); com_addfwref(c, FOR_ITER, &anchor); com_push(c, 1); com_assign(c, CHILD(n, 1), OP_ASSIGN, NULL); @@ -3339,7 +3335,7 @@ com_try_except(struct compiling *c, node *n) } except_anchor = 0; com_push(c, 3); /* tb, val, exc pushed by exception */ - com_addoparg(c, SET_LINENO, ch->n_lineno); + com_set_lineno(c, ch->n_lineno); if (NCH(ch) > 1) { com_addbyte(c, DUP_TOP); com_push(c, 1); @@ -3401,7 +3397,7 @@ com_try_finally(struct compiling *c, node *n) com_push(c, 3); com_backpatch(c, finally_anchor); ch = CHILD(n, NCH(n)-1); - com_addoparg(c, SET_LINENO, ch->n_lineno); + com_set_lineno(c, ch->n_lineno); com_node(c, ch); com_addbyte(c, END_FINALLY); block_pop(c, END_FINALLY); @@ -3727,7 +3723,7 @@ com_node(struct compiling *c, node *n) case simple_stmt: /* small_stmt (';' small_stmt)* [';'] NEWLINE */ - com_addoparg(c, SET_LINENO, n->n_lineno); + com_set_lineno(c, n->n_lineno); { int i; for (i = 0; i < NCH(n)-1; i += 2) @@ -3736,7 +3732,7 @@ com_node(struct compiling *c, node *n) break; case compound_stmt: - com_addoparg(c, SET_LINENO, n->n_lineno); + com_set_lineno(c, n->n_lineno); n = CHILD(n, 0); goto loop; @@ -3990,10 +3986,7 @@ compile_funcdef(struct compiling *c, node *n) c->c_infunction = 1; com_node(c, CHILD(n, 4)); c->c_infunction = 0; - com_addoparg(c, LOAD_CONST, com_addconst(c, Py_None)); - com_push(c, 1); - com_addbyte(c, RETURN_VALUE); - com_pop(c, 1); + com_addbyte(c, RETURN_NONE); } static void @@ -4050,7 +4043,7 @@ compile_classdef(struct compiling *c, node *n) static void compile_node(struct compiling *c, node *n) { - com_addoparg(c, SET_LINENO, n->n_lineno); + com_set_lineno(c, n->n_lineno); switch (TYPE(n)) { @@ -4060,19 +4053,13 @@ compile_node(struct compiling *c, node *n) n = CHILD(n, 0); if (TYPE(n) != NEWLINE) com_node(c, n); - com_addoparg(c, LOAD_CONST, com_addconst(c, Py_None)); - com_push(c, 1); - com_addbyte(c, RETURN_VALUE); - com_pop(c, 1); + com_addbyte(c, RETURN_NONE); c->c_interactive--; break; case file_input: /* A whole file, or built-in function exec() */ com_file_input(c, n); - com_addoparg(c, LOAD_CONST, com_addconst(c, Py_None)); - com_push(c, 1); - com_addbyte(c, RETURN_VALUE); - com_pop(c, 1); + com_addbyte(c, RETURN_NONE); break; case eval_input: /* Built-in function input() */ diff --git a/Python/frozen.c b/Python/frozen.c index 069ce3c..946d626 100644 --- a/Python/frozen.c +++ b/Python/frozen.c @@ -13,12 +13,12 @@ static unsigned char M___hello__[] = { 99,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, - 0,115,15,0,0,0,127,0,0,127,1,0,100,0,0,71, - 72,100,1,0,83,40,2,0,0,0,115,14,0,0,0,72, - 101,108,108,111,32,119,111,114,108,100,46,46,46,78,40,0, - 0,0,0,40,0,0,0,0,40,0,0,0,0,40,0,0, - 0,0,115,8,0,0,0,104,101,108,108,111,46,112,121,115, - 1,0,0,0,63,1,0,0,0,115,0,0,0,0, + 0,115,9,0,0,0,100,0,0,71,72,100,1,0,83,40, + 2,0,0,0,115,14,0,0,0,72,101,108,108,111,32,119, + 111,114,108,100,46,46,46,78,40,0,0,0,0,40,0,0, + 0,0,40,0,0,0,0,40,0,0,0,0,115,8,0,0, + 0,104,101,108,108,111,46,112,121,115,1,0,0,0,63,1, + 0,0,0,115,0,0,0,0, }; #define SIZE (int)sizeof(M___hello__) diff --git a/Python/import.c b/Python/import.c index 6fd05d8..36ca705 100644 --- a/Python/import.c +++ b/Python/import.c @@ -49,6 +49,9 @@ extern time_t PyOS_GetLastModificationTime(char *, FILE *); algorithm relying on the above scheme. Perhaps we should simply start counting in increments of 10 from now on ?! + MWH, 2002-08-03: Removed SET_LINENO. Couldn't be bothered figuring + out the MAGIC schemes, so just incremented it by 10. + Known values: Python 1.5: 20121 Python 1.5.1: 20121 @@ -60,8 +63,9 @@ extern time_t PyOS_GetLastModificationTime(char *, FILE *); Python 2.1.2: 60202 Python 2.2: 60717 Python 2.3a0: 62011 + Python 2.3a0: 62021 */ -#define MAGIC (62011 | ((long)'\r'<<16) | ((long)'\n'<<24)) +#define MAGIC (62021 | ((long)'\r'<<16) | ((long)'\n'<<24)) /* Magic word as global; note that _PyImport_Init() can change the value of this global to accommodate for alterations of how the diff --git a/Python/traceback.c b/Python/traceback.c index de918f9..27c69b6 100644 --- a/Python/traceback.c +++ b/Python/traceback.c @@ -103,8 +103,7 @@ PyTypeObject PyTraceBack_Type = { }; static tracebackobject * -newtracebackobject(tracebackobject *next, PyFrameObject *frame, int lasti, - int lineno) +newtracebackobject(tracebackobject *next, PyFrameObject *frame) { tracebackobject *tb; if ((next != NULL && !PyTraceBack_Check(next)) || @@ -118,8 +117,9 @@ newtracebackobject(tracebackobject *next, PyFrameObject *frame, int lasti, tb->tb_next = next; Py_XINCREF(frame); tb->tb_frame = frame; - tb->tb_lasti = lasti; - tb->tb_lineno = lineno; + tb->tb_lasti = frame->f_lasti; + tb->tb_lineno = PyCode_Addr2Line(frame->f_code, + frame->f_lasti); PyObject_GC_Track(tb); } return tb; @@ -130,8 +130,7 @@ PyTraceBack_Here(PyFrameObject *frame) { PyThreadState *tstate = frame->f_tstate; tracebackobject *oldtb = (tracebackobject *) tstate->curexc_traceback; - tracebackobject *tb = newtracebackobject(oldtb, - frame, frame->f_lasti, frame->f_lineno); + tracebackobject *tb = newtracebackobject(oldtb, frame); if (tb == NULL) return -1; tstate->curexc_traceback = (PyObject *)tb; |