summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_tokenize.py
diff options
context:
space:
mode:
authorBrett Cannon <brett@python.org>2016-09-09 21:57:09 (GMT)
committerBrett Cannon <brett@python.org>2016-09-09 21:57:09 (GMT)
commita721abac299bb6529021000a71847486d531b41a (patch)
tree8355a69b891cfcdaad8a5fd62870231b7f940696 /Lib/test/test_tokenize.py
parentee73a657455a908102379d3c9bc254676418e10c (diff)
downloadcpython-a721abac299bb6529021000a71847486d531b41a.zip
cpython-a721abac299bb6529021000a71847486d531b41a.tar.gz
cpython-a721abac299bb6529021000a71847486d531b41a.tar.bz2
Issue #26331: Implement the parsing part of PEP 515.
Thanks to Georg Brandl for the patch.
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r--Lib/test/test_tokenize.py30
1 files changed, 23 insertions, 7 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 4c469a8..5a81a5f 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -3,7 +3,9 @@ from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer)
from io import BytesIO
-from unittest import TestCase, mock, main
+from unittest import TestCase, mock
+from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
+ INVALID_UNDERSCORE_LITERALS)
import os
import token
@@ -185,6 +187,21 @@ def k(x):
NUMBER '3.14e159' (1, 4) (1, 12)
""")
+ def test_underscore_literals(self):
+ def number_token(s):
+ f = BytesIO(s.encode('utf-8'))
+ for toktype, token, start, end, line in tokenize(f.readline):
+ if toktype == NUMBER:
+ return token
+ return 'invalid token'
+ for lit in VALID_UNDERSCORE_LITERALS:
+ if '(' in lit:
+ # this won't work with compound complex inputs
+ continue
+ self.assertEqual(number_token(lit), lit)
+ for lit in INVALID_UNDERSCORE_LITERALS:
+ self.assertNotEqual(number_token(lit), lit)
+
def test_string(self):
# String literals
self.check_tokenize("x = ''; y = \"\"", """\
@@ -1529,11 +1546,10 @@ class TestRoundtrip(TestCase):
tempdir = os.path.dirname(fn) or os.curdir
testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
- # Tokenize is broken on test_unicode_identifiers.py because regular
- # expressions are broken on the obscure unicode identifiers in it.
- # *sigh* With roundtrip extended to test the 5-tuple mode of
- # untokenize, 7 more testfiles fail. Remove them also until the
- # failure is diagnosed.
+ # Tokenize is broken on test_pep3131.py because regular expressions are
+ # broken on the obscure unicode identifiers in it. *sigh*
+ # With roundtrip extended to test the 5-tuple mode of untokenize,
+ # 7 more testfiles fail. Remove them also until the failure is diagnosed.
testfiles.remove(os.path.join(tempdir, "test_unicode_identifiers.py"))
for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'):
@@ -1565,4 +1581,4 @@ class TestRoundtrip(TestCase):
if __name__ == "__main__":
- main()
+ unittest.main()