diff options
author | Victor Stinner <victor.stinner@haypocalc.com> | 2010-03-02 23:20:02 (GMT) |
---|---|---|
committer | Victor Stinner <victor.stinner@haypocalc.com> | 2010-03-02 23:20:02 (GMT) |
commit | d23d3930ff1ce72263537bb050824129c6ac74f6 (patch) | |
tree | a742fd5b1cc1593e55c169a3b40919d28ea7dedd | |
parent | 0e717addd8a03ed10231a578fce894c07ddb157e (diff) | |
download | cpython-d23d3930ff1ce72263537bb050824129c6ac74f6.zip cpython-d23d3930ff1ce72263537bb050824129c6ac74f6.tar.gz cpython-d23d3930ff1ce72263537bb050824129c6ac74f6.tar.bz2 |
Issue #7820: The parser tokenizer restores all bytes in the right if the BOM
check fails.
Fix an assertion in pydebug mode.
-rw-r--r-- | Lib/test/test_pep263.py | 11 | ||||
-rw-r--r-- | Misc/NEWS | 3 | ||||
-rw-r--r-- | Parser/tokenizer.c | 54 |
3 files changed, 46 insertions, 22 deletions
diff --git a/Lib/test/test_pep263.py b/Lib/test/test_pep263.py index e4faa9f..9286467 100644 --- a/Lib/test/test_pep263.py +++ b/Lib/test/test_pep263.py @@ -30,6 +30,17 @@ class PEP263Test(unittest.TestCase): self.assertEqual(d['a'], d['b']) self.assertEqual(len(d['a']), len(d['b'])) + def test_issue7820(self): + # Ensure that check_bom() restores all bytes in the right order if + # check_bom() fails in pydebug mode: a buffer starts with the first + # byte of a valid BOM, but next bytes are different + + # one byte in common with the UTF-16-LE BOM + self.assertRaises(SyntaxError, eval, '\xff\x20') + + # two bytes in common with the UTF-8 BOM + self.assertRaises(SyntaxError, eval, '\xef\xbb\x20') + def test_main(): test_support.run_unittest(PEP263Test) @@ -12,6 +12,9 @@ What's New in Python 2.7 alpha 4? Core and Builtins ----------------- +- Issue #7820: The parser tokenizer restores all bytes in the right if + the BOM check fails. + - Issue #7309: Fix unchecked attribute access when converting UnicodeEncodeError, UnicodeDecodeError, and UnicodeTranslateError to strings. diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 04749c8..b881e7c 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -312,47 +312,57 @@ check_bom(int get_char(struct tok_state *), int set_readline(struct tok_state *, const char *), struct tok_state *tok) { - int ch = get_char(tok); + int ch1, ch2, ch3; + ch1 = get_char(tok); tok->decoding_state = 1; - if (ch == EOF) { + if (ch1 == EOF) { return 1; - } else if (ch == 0xEF) { - ch = get_char(tok); - if (ch != 0xBB) - goto NON_BOM; - ch = get_char(tok); - if (ch != 0xBF) - goto NON_BOM; + } else if (ch1 == 0xEF) { + ch2 = get_char(tok); + if (ch2 != 0xBB) { + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } + ch3 = get_char(tok); + if (ch3 != 0xBF) { + unget_char(ch3, tok); + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } #if 0 /* Disable support for UTF-16 BOMs until a decision is made whether this needs to be supported. */ - } else if (ch == 0xFE) { - ch = get_char(tok); - if (ch != 0xFF) - goto NON_BOM; + } else if (ch1 == 0xFE) { + ch2 = get_char(tok); + if (ch2 != 0xFF) { + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } if (!set_readline(tok, "utf-16-be")) return 0; tok->decoding_state = -1; - } else if (ch == 0xFF) { - ch = get_char(tok); - if (ch != 0xFE) - goto NON_BOM; + } else if (ch1 == 0xFF) { + ch2 = get_char(tok); + if (ch2 != 0xFE) { + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } if (!set_readline(tok, "utf-16-le")) return 0; tok->decoding_state = -1; #endif } else { - unget_char(ch, tok); + unget_char(ch1, tok); return 1; } if (tok->encoding != NULL) PyMem_FREE(tok->encoding); tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */ return 1; - NON_BOM: - /* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */ - unget_char(0xFF, tok); /* XXX this will cause a syntax error */ - return 1; } /* Read a line of text from TOK into S, using the stream in TOK. |