diff options
author | Marta Gómez Macías <mgmacias@google.com> | 2023-05-21 00:03:02 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-05-21 00:03:02 (GMT) |
commit | 6715f91edcf6f379f666e18f57b8a0dcb724bf79 (patch) | |
tree | 25724d6eb5b8ff5e713f7bfd8f6c33e5a6d87f62 /Python/clinic | |
parent | 3ed57e4995d9f8583083483f397ddc3131720953 (diff) | |
download | cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.zip cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.gz cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.bz2 |
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation
that reuses the real C tokenizer via a private extension module. The tokenize module now implements
a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward
compatibility.
As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via
the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation.
Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
Diffstat (limited to 'Python/clinic')
-rw-r--r-- | Python/clinic/Python-tokenize.c.h | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/Python/clinic/Python-tokenize.c.h b/Python/clinic/Python-tokenize.c.h index 6af9374..7e77938 100644 --- a/Python/clinic/Python-tokenize.c.h +++ b/Python/clinic/Python-tokenize.c.h @@ -9,7 +9,8 @@ preserve static PyObject * -tokenizeriter_new_impl(PyTypeObject *type, const char *source); +tokenizeriter_new_impl(PyTypeObject *type, const char *source, + int extra_tokens); static PyObject * tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) @@ -17,14 +18,14 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - #define NUM_KEYWORDS 1 + #define NUM_KEYWORDS 2 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(source), }, + .ob_item = { &_Py_ID(source), &_Py_ID(extra_tokens), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) @@ -33,19 +34,20 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) # define KWTUPLE NULL #endif // !Py_BUILD_CORE - static const char * const _keywords[] = {"source", NULL}; + static const char * const _keywords[] = {"source", "extra_tokens", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "tokenizeriter", .kwtuple = KWTUPLE, }; #undef KWTUPLE - PyObject *argsbuf[1]; + PyObject *argsbuf[2]; PyObject * const *fastargs; Py_ssize_t nargs = PyTuple_GET_SIZE(args); const char *source; + int extra_tokens; - fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 0, argsbuf); + fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 1, argsbuf); if (!fastargs) { goto exit; } @@ -62,9 +64,13 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } - return_value = tokenizeriter_new_impl(type, source); + extra_tokens = PyObject_IsTrue(fastargs[1]); + if (extra_tokens < 0) { + goto exit; + } + return_value = tokenizeriter_new_impl(type, source, extra_tokens); exit: return return_value; } -/*[clinic end generated code: output=8c2c09f651961986 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=940b564c67f6e0e2 input=a9049054013a1b77]*/ |