diff options
author | Guido van Rossum <guido@python.org> | 2007-10-10 18:44:39 (GMT) |
---|---|---|
committer | Guido van Rossum <guido@python.org> | 2007-10-10 18:44:39 (GMT) |
commit | 641591cffe09210b46b8f14b62831a17469995cd (patch) | |
tree | feece0fed8c818d81a91a36ebcf02dcb52a539c2 /Parser | |
parent | feea0786a2f2d38b99c1867a4e6f53e8e764905b (diff) | |
download | cpython-641591cffe09210b46b8f14b62831a17469995cd.zip cpython-641591cffe09210b46b8f14b62831a17469995cd.tar.gz cpython-641591cffe09210b46b8f14b62831a17469995cd.tar.bz2 |
Fix an issue in PyTokenizer_RestoreEncoding() which was treating a PyBytes
object with PyString calls and not checking errors. This caused the display
of syntax errors to be deformed.
Diffstat (limited to 'Parser')
-rw-r--r-- | Parser/tokenizer.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 0d43381..bb171c3 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -1556,7 +1556,10 @@ dec_utf8(const char *enc, const char *text, size_t len) { Py_DECREF(unicode_text); } if (!ret) { - PyErr_Print(); + PyErr_Clear(); + } + else { + assert(PyBytes_Check(ret)); } return ret; } @@ -1569,8 +1572,8 @@ PyTokenizer_RestoreEncoding(struct tok_state* tok, int len, int *offset) /* convert source to original encondig */ PyObject *lineobj = dec_utf8(tok->encoding, tok->buf, len); if (lineobj != NULL) { - int linelen = PyString_Size(lineobj); - const char *line = PyString_AsString(lineobj); + int linelen = PyBytes_GET_SIZE(lineobj); + const char *line = PyBytes_AS_STRING(lineobj); text = PyObject_MALLOC(linelen + 1); if (text != NULL && line != NULL) { if (linelen) @@ -1582,9 +1585,11 @@ PyTokenizer_RestoreEncoding(struct tok_state* tok, int len, int *offset) /* adjust error offset */ if (*offset > 1) { PyObject *offsetobj = dec_utf8(tok->encoding, - tok->buf, *offset-1); + tok->buf, + *offset-1); if (offsetobj) { - *offset = PyString_Size(offsetobj) + 1; + *offset = 1 + + PyBytes_GET_SIZE(offsetobj); Py_DECREF(offsetobj); } } |