diff options
author | Benjamin Peterson <benjamin@python.org> | 2009-10-09 21:53:27 (GMT) |
---|---|---|
committer | Benjamin Peterson <benjamin@python.org> | 2009-10-09 21:53:27 (GMT) |
commit | 0c7f9c96f5fbe8c445ccd3e2ee315f2e2f45a3ca (patch) | |
tree | 0deb207baf2fc47748cb82897cbb5bb445a26dba /Lib/test/test_tokenize.py | |
parent | ae2fa6fad26cbbef8ad408b4f5188d2a7a9f8c89 (diff) | |
download | cpython-0c7f9c96f5fbe8c445ccd3e2ee315f2e2f45a3ca.zip cpython-0c7f9c96f5fbe8c445ccd3e2ee315f2e2f45a3ca.tar.gz cpython-0c7f9c96f5fbe8c445ccd3e2ee315f2e2f45a3ca.tar.bz2 |
Merged revisions 75299 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/py3k
........
r75299 | benjamin.peterson | 2009-10-09 16:43:09 -0500 (Fri, 09 Oct 2009) | 1 line
normalize latin-1 and utf-8 variant encodings like the builtin tokenizer does
........
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r-- | Lib/test/test_tokenize.py | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index f395ed4..ba705ba 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -719,7 +719,7 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'latin-1') + self.assertEquals(encoding, 'iso-8859-1') self.assertEquals(consumed_lines, [b'# -*- coding: latin-1 -*-\n']) def test_matched_bom_and_cookie_first_line(self): @@ -775,6 +775,34 @@ class TestDetectEncoding(TestCase): readline = self.get_readline(lines) self.assertRaises(SyntaxError, detect_encoding, readline) + def test_latin1_normalization(self): + # See get_normal_name() in tokenizer.c. + encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix", + "iso-8859-1-unix", "iso-latin-1-mac") + for encoding in encodings: + for rep in ("-", "_"): + enc = encoding.replace("-", rep) + lines = (b"#!/usr/bin/python\n", + b"# coding: " + enc.encode("ascii") + b"\n", + b"print(things)\n", + b"do_something += 4\n") + rl = self.get_readline(lines) + found, consumed_lines = detect_encoding(rl) + self.assertEquals(found, "iso-8859-1") + + def test_utf8_normalization(self): + # See get_normal_name() in tokenizer.c. + encodings = ("utf-8", "utf-8-mac", "utf-8-unix") + for encoding in encodings: + for rep in ("-", "_"): + enc = encoding.replace("-", rep) + lines = (b"#!/usr/bin/python\n", + b"# coding: " + enc.encode("ascii") + b"\n", + b"1 + 3\n") + rl = self.get_readline(lines) + found, consumed_lines = detect_encoding(rl) + self.assertEquals(found, "utf-8") + def test_short_files(self): readline = self.get_readline((b'print(something)\n',)) encoding, consumed_lines = detect_encoding(readline) |