diff options
author | Terry Jan Reedy <tjreedy@udel.edu> | 2014-02-24 04:33:08 (GMT) |
---|---|---|
committer | Terry Jan Reedy <tjreedy@udel.edu> | 2014-02-24 04:33:08 (GMT) |
commit | 9dc3a36c849c15c227a8af218cfb215abe7b3c48 (patch) | |
tree | 596fc7e9ff8839ade21aa372182affe4defb8f85 /Lib/test | |
parent | 938ba685dc9bbd34a598465eabbbee849b00d803 (diff) | |
download | cpython-9dc3a36c849c15c227a8af218cfb215abe7b3c48.zip cpython-9dc3a36c849c15c227a8af218cfb215abe7b3c48.tar.gz cpython-9dc3a36c849c15c227a8af218cfb215abe7b3c48.tar.bz2 |
Issue #9974: When untokenizing, use row info to insert backslash+newline.
Original patches by A. Kuchling and G. Rees (#12691).
Diffstat (limited to 'Lib/test')
-rw-r--r-- | Lib/test/test_tokenize.py | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 7a472b2..38611a7 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -2,7 +2,7 @@ doctests = """ Tests for the tokenize module. The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARK is omitted for +code, print out a table with tokens. The ENDMARKER is omitted for brevity. >>> dump_tokens("1 + 1") @@ -1180,6 +1180,7 @@ class TestTokenize(TestCase): class UntokenizeTest(TestCase): def test_bad_input_order(self): + # raise if previous row u = Untokenizer() u.prev_row = 2 u.prev_col = 2 @@ -1187,8 +1188,22 @@ class UntokenizeTest(TestCase): u.add_whitespace((1,3)) self.assertEqual(cm.exception.args[0], 'start (1,3) precedes previous end (2,2)') + # raise if previous column in row self.assertRaises(ValueError, u.add_whitespace, (2,1)) + def test_backslash_continuation(self): + # The problem is that <whitespace>\<newline> leaves no token + u = Untokenizer() + u.prev_row = 1 + u.prev_col = 1 + u.tokens = [] + u.add_whitespace((2, 0)) + self.assertEqual(u.tokens, ['\\\n']) + u.prev_row = 2 + u.add_whitespace((4, 4)) + self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' ']) + self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n')) + def test_iter_compat(self): u = Untokenizer() token = (NAME, 'Hello') |