diff options
author | Amaury Forgeot d'Arc <amauryfa@gmail.com> | 2007-11-22 20:53:01 (GMT) |
---|---|---|
committer | Amaury Forgeot d'Arc <amauryfa@gmail.com> | 2007-11-22 20:53:01 (GMT) |
commit | af59346f1ac1d1acf0d17b789d0e69f6d95d6e38 (patch) | |
tree | 973c0c070d66f8cd33611be77824a292a88c2934 /Parser/tokenizer.c | |
parent | 24eac034be17067f4df3a277ae42e30af138441a (diff) | |
download | cpython-af59346f1ac1d1acf0d17b789d0e69f6d95d6e38.zip cpython-af59346f1ac1d1acf0d17b789d0e69f6d95d6e38.tar.gz cpython-af59346f1ac1d1acf0d17b789d0e69f6d95d6e38.tar.bz2 |
Problem found while converting from PyBytes to PyString:
Re-enable (and correct) a test for the BOM at the beginning of a code unit.
And properly "unget" characters when the BOM is incomplete.
Diffstat (limited to 'Parser/tokenizer.c')
-rw-r--r-- | Parser/tokenizer.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 1c2b8e8..2c7da7c 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -323,8 +323,21 @@ check_bom(int get_char(struct tok_state *), if (ch == EOF) { return 1; } else if (ch == 0xEF) { - ch = get_char(tok); if (ch != 0xBB) goto NON_BOM; - ch = get_char(tok); if (ch != 0xBF) goto NON_BOM; + ch = get_char(tok); + if (ch != 0xBB) { + unget_char(ch, tok); + unget_char(0xEF, tok); + /* any token beginning with '\xEF' is a bad token */ + return 1; + } + ch = get_char(tok); + if (ch != 0xBF) { + unget_char(ch, tok); + unget_char(0xBB, tok); + unget_char(0xEF, tok); + /* any token beginning with '\xEF' is a bad token */ + return 1; + } #if 0 /* Disable support for UTF-16 BOMs until a decision is made whether this needs to be supported. */ @@ -344,10 +357,7 @@ check_bom(int get_char(struct tok_state *), if (tok->encoding != NULL) PyMem_FREE(tok->encoding); tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */ - return 1; - NON_BOM: - /* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */ - unget_char(0xFF, tok); /* XXX this will cause a syntax error */ + /* No need to set_readline: input is already utf-8 */ return 1; } @@ -641,7 +651,7 @@ decode_str(const char *str, struct tok_state *tok) utf8 = translate_into_utf8(str, tok->enc); if (utf8 == NULL) return error_ret(tok); - str = PyBytes_AsString(utf8); + str = PyString_AsString(utf8); } for (s = str;; s++) { if (*s == '\0') break; |