summaryrefslogtreecommitdiffstats
path: root/Lib/test
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/test')
-rw-r--r--Lib/test/test_tokenize.py17
1 files changed, 16 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 7a472b2..38611a7 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -2,7 +2,7 @@ doctests = """
Tests for the tokenize module.
The tests can be really simple. Given a small fragment of source
-code, print out a table with tokens. The ENDMARK is omitted for
+code, print out a table with tokens. The ENDMARKER is omitted for
brevity.
>>> dump_tokens("1 + 1")
@@ -1180,6 +1180,7 @@ class TestTokenize(TestCase):
class UntokenizeTest(TestCase):
def test_bad_input_order(self):
+ # raise if previous row
u = Untokenizer()
u.prev_row = 2
u.prev_col = 2
@@ -1187,8 +1188,22 @@ class UntokenizeTest(TestCase):
u.add_whitespace((1,3))
self.assertEqual(cm.exception.args[0],
'start (1,3) precedes previous end (2,2)')
+ # raise if previous column in row
self.assertRaises(ValueError, u.add_whitespace, (2,1))
+ def test_backslash_continuation(self):
+ # The problem is that <whitespace>\<newline> leaves no token
+ u = Untokenizer()
+ u.prev_row = 1
+ u.prev_col = 1
+ u.tokens = []
+ u.add_whitespace((2, 0))
+ self.assertEqual(u.tokens, ['\\\n'])
+ u.prev_row = 2
+ u.add_whitespace((4, 4))
+ self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' '])
+ self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n'))
+
def test_iter_compat(self):
u = Untokenizer()
token = (NAME, 'Hello')