summaryrefslogtreecommitdiffstats
path: root/Parser/pegen_errors.c
diff options
context:
space:
mode:
authorMarta Gómez Macías <mgmacias@google.com>2023-05-21 00:03:02 (GMT)
committerGitHub <noreply@github.com>2023-05-21 00:03:02 (GMT)
commit6715f91edcf6f379f666e18f57b8a0dcb724bf79 (patch)
tree25724d6eb5b8ff5e713f7bfd8f6c33e5a6d87f62 /Parser/pegen_errors.c
parent3ed57e4995d9f8583083483f397ddc3131720953 (diff)
downloadcpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.zip
cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.gz
cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.bz2
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation that reuses the real C tokenizer via a private extension module. The tokenize module now implements a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward compatibility. As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation. Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
Diffstat (limited to 'Parser/pegen_errors.c')
-rw-r--r--Parser/pegen_errors.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/Parser/pegen_errors.c b/Parser/pegen_errors.c
index 1f227da..af52905 100644
--- a/Parser/pegen_errors.c
+++ b/Parser/pegen_errors.c
@@ -165,7 +165,7 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {
int ret = 0;
struct token new_token;
- new_token.metadata = NULL;
+ _PyToken_Init(&new_token);
for (;;) {
switch (_PyTokenizer_Get(p->tok, &new_token)) {
@@ -193,7 +193,7 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {
exit:
- Py_XDECREF(new_token.metadata);
+ _PyToken_Free(&new_token);
// If we're in an f-string, we want the syntax error in the expression part
// to propagate, so that tokenizer errors (like expecting '}') that happen afterwards
// do not swallow it.