summaryrefslogtreecommitdiffstats
path: root/Lib/tabnanny.py
diff options
context:
space:
mode:
authorMarta Gómez Macías <mgmacias@google.com>2023-05-21 00:03:02 (GMT)
committerGitHub <noreply@github.com>2023-05-21 00:03:02 (GMT)
commit6715f91edcf6f379f666e18f57b8a0dcb724bf79 (patch)
tree25724d6eb5b8ff5e713f7bfd8f6c33e5a6d87f62 /Lib/tabnanny.py
parent3ed57e4995d9f8583083483f397ddc3131720953 (diff)
downloadcpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.zip
cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.gz
cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.bz2
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation that reuses the real C tokenizer via a private extension module. The tokenize module now implements a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward compatibility. As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation. Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
Diffstat (limited to 'Lib/tabnanny.py')
-rwxr-xr-xLib/tabnanny.py10
1 files changed, 10 insertions, 0 deletions
diff --git a/Lib/tabnanny.py b/Lib/tabnanny.py
index 9d2df59..e2ac683 100755
--- a/Lib/tabnanny.py
+++ b/Lib/tabnanny.py
@@ -107,6 +107,10 @@ def check(file):
errprint("%r: Token Error: %s" % (file, msg))
return
+ except SyntaxError as msg:
+ errprint("%r: Token Error: %s" % (file, msg))
+ return
+
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
@@ -272,6 +276,12 @@ def format_witnesses(w):
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
+ try:
+ _process_tokens(tokens)
+ except TabError as e:
+ raise NannyNag(e.lineno, e.msg, e.text)
+
+def _process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE