summaryrefslogtreecommitdiffstats
path: root/Python
diff options
context:
space:
mode:
authorMarta Gómez Macías <mgmacias@google.com>2023-05-21 00:03:02 (GMT)
committerGitHub <noreply@github.com>2023-05-21 00:03:02 (GMT)
commit6715f91edcf6f379f666e18f57b8a0dcb724bf79 (patch)
tree25724d6eb5b8ff5e713f7bfd8f6c33e5a6d87f62 /Python
parent3ed57e4995d9f8583083483f397ddc3131720953 (diff)
downloadcpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.zip
cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.gz
cpython-6715f91edcf6f379f666e18f57b8a0dcb724bf79.tar.bz2
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation that reuses the real C tokenizer via a private extension module. The tokenize module now implements a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward compatibility. As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation. Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
Diffstat (limited to 'Python')
-rw-r--r--Python/Python-tokenize.c140
-rw-r--r--Python/clinic/Python-tokenize.c.h22
2 files changed, 140 insertions, 22 deletions
diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c
index 3394a51..ece2386 100644
--- a/Python/Python-tokenize.c
+++ b/Python/Python-tokenize.c
@@ -1,5 +1,8 @@
#include "Python.h"
+#include "errcode.h"
#include "../Parser/tokenizer.h"
+#include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset()
+#include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset()
static struct PyModuleDef _tokenizemodule;
@@ -34,11 +37,14 @@ typedef struct
_tokenizer.tokenizeriter.__new__ as tokenizeriter_new
source: str
+ *
+ extra_tokens: bool
[clinic start generated code]*/
static PyObject *
-tokenizeriter_new_impl(PyTypeObject *type, const char *source)
-/*[clinic end generated code: output=7fd9f46cf9263cbb input=4384b368407375c6]*/
+tokenizeriter_new_impl(PyTypeObject *type, const char *source,
+ int extra_tokens)
+/*[clinic end generated code: output=f6f9d8b4beec8106 input=90dc5b6a5df180c2]*/
{
tokenizeriterobject *self = (tokenizeriterobject *)type->tp_alloc(type, 0);
if (self == NULL) {
@@ -54,20 +60,123 @@ tokenizeriter_new_impl(PyTypeObject *type, const char *source)
return NULL;
}
self->tok->filename = filename;
+ if (extra_tokens) {
+ self->tok->tok_extra_tokens = 1;
+ }
return (PyObject *)self;
}
+static int
+_tokenizer_error(struct tok_state *tok)
+{
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+
+ const char *msg = NULL;
+ PyObject* errtype = PyExc_SyntaxError;
+ switch (tok->done) {
+ case E_TOKEN:
+ msg = "invalid token";
+ break;
+ case E_EOF:
+ if (tok->level) {
+ PyErr_Format(PyExc_SyntaxError,
+ "parenthesis '%c' was never closed",
+ tok->parenstack[tok->level-1]);
+ } else {
+ PyErr_SetString(PyExc_SyntaxError, "unexpected EOF while parsing");
+ }
+ return -1;
+ case E_DEDENT:
+ PyErr_Format(PyExc_IndentationError,
+ "unindent does not match any outer indentation level "
+ "(<tokenize>, line %d)",
+ tok->lineno);
+ return -1;
+ case E_INTR:
+ if (!PyErr_Occurred()) {
+ PyErr_SetNone(PyExc_KeyboardInterrupt);
+ }
+ return -1;
+ case E_NOMEM:
+ PyErr_NoMemory();
+ return -1;
+ case E_TABSPACE:
+ errtype = PyExc_TabError;
+ msg = "inconsistent use of tabs and spaces in indentation";
+ break;
+ case E_TOODEEP:
+ errtype = PyExc_IndentationError;
+ msg = "too many levels of indentation";
+ break;
+ case E_LINECONT: {
+ msg = "unexpected character after line continuation character";
+ break;
+ }
+ default:
+ msg = "unknown tokenization error";
+ }
+
+ PyObject* errstr = NULL;
+ PyObject* error_line = NULL;
+ PyObject* tmp = NULL;
+ PyObject* value = NULL;
+ int result = 0;
+
+ Py_ssize_t size = tok->inp - tok->buf;
+ error_line = PyUnicode_DecodeUTF8(tok->buf, size, "replace");
+ if (!error_line) {
+ result = -1;
+ goto exit;
+ }
+
+ tmp = Py_BuildValue("(OnnOii)", tok->filename, tok->lineno, 0, error_line, 0, 0);
+ if (!tmp) {
+ result = -1;
+ goto exit;
+ }
+
+ errstr = PyUnicode_FromString(msg);
+ if (!errstr) {
+ result = -1;
+ goto exit;
+ }
+
+ value = PyTuple_Pack(2, errstr, tmp);
+ if (!value) {
+ result = -1;
+ goto exit;
+ }
+
+ PyErr_SetObject(errtype, value);
+
+exit:
+ Py_XDECREF(errstr);
+ Py_XDECREF(error_line);
+ Py_XDECREF(tmp);
+ Py_XDECREF(value);
+ return result;
+}
+
static PyObject *
tokenizeriter_next(tokenizeriterobject *it)
{
+ PyObject* result = NULL;
struct token token;
+ _PyToken_Init(&token);
+
int type = _PyTokenizer_Get(it->tok, &token);
- if (type == ERRORTOKEN && PyErr_Occurred()) {
- return NULL;
+ if (type == ERRORTOKEN) {
+ if(!PyErr_Occurred()) {
+ _tokenizer_error(it->tok);
+ assert(PyErr_Occurred());
+ }
+ goto exit;
}
if (type == ERRORTOKEN || type == ENDMARKER) {
PyErr_SetString(PyExc_StopIteration, "EOF");
- return NULL;
+ goto exit;
}
PyObject *str = NULL;
if (token.start == NULL || token.end == NULL) {
@@ -77,28 +186,31 @@ tokenizeriter_next(tokenizeriterobject *it)
str = PyUnicode_FromStringAndSize(token.start, token.end - token.start);
}
if (str == NULL) {
- return NULL;
+ goto exit;
}
Py_ssize_t size = it->tok->inp - it->tok->buf;
PyObject *line = PyUnicode_DecodeUTF8(it->tok->buf, size, "replace");
if (line == NULL) {
Py_DECREF(str);
- return NULL;
+ goto exit;
}
const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start : it->tok->line_start;
- int lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
- int end_lineno = it->tok->lineno;
- int col_offset = -1;
- int end_col_offset = -1;
+ Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
+ Py_ssize_t end_lineno = it->tok->lineno;
+ Py_ssize_t col_offset = -1;
+ Py_ssize_t end_col_offset = -1;
if (token.start != NULL && token.start >= line_start) {
- col_offset = (int)(token.start - line_start);
+ col_offset = _PyPegen_byte_offset_to_character_offset(line, token.start - line_start);
}
if (token.end != NULL && token.end >= it->tok->line_start) {
- end_col_offset = (int)(token.end - it->tok->line_start);
+ end_col_offset = _PyPegen_byte_offset_to_character_offset(line, token.end - it->tok->line_start);
}
- return Py_BuildValue("(NiiiiiN)", str, type, lineno, end_lineno, col_offset, end_col_offset, line);
+ result = Py_BuildValue("(NinnnnN)", str, type, lineno, end_lineno, col_offset, end_col_offset, line);
+exit:
+ _PyToken_Free(&token);
+ return result;
}
static void
diff --git a/Python/clinic/Python-tokenize.c.h b/Python/clinic/Python-tokenize.c.h
index 6af9374..7e77938 100644
--- a/Python/clinic/Python-tokenize.c.h
+++ b/Python/clinic/Python-tokenize.c.h
@@ -9,7 +9,8 @@ preserve
static PyObject *
-tokenizeriter_new_impl(PyTypeObject *type, const char *source);
+tokenizeriter_new_impl(PyTypeObject *type, const char *source,
+ int extra_tokens);
static PyObject *
tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
@@ -17,14 +18,14 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
- #define NUM_KEYWORDS 1
+ #define NUM_KEYWORDS 2
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
- .ob_item = { &_Py_ID(source), },
+ .ob_item = { &_Py_ID(source), &_Py_ID(extra_tokens), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
@@ -33,19 +34,20 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
- static const char * const _keywords[] = {"source", NULL};
+ static const char * const _keywords[] = {"source", "extra_tokens", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "tokenizeriter",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
- PyObject *argsbuf[1];
+ PyObject *argsbuf[2];
PyObject * const *fastargs;
Py_ssize_t nargs = PyTuple_GET_SIZE(args);
const char *source;
+ int extra_tokens;
- fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 0, argsbuf);
+ fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 1, argsbuf);
if (!fastargs) {
goto exit;
}
@@ -62,9 +64,13 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
PyErr_SetString(PyExc_ValueError, "embedded null character");
goto exit;
}
- return_value = tokenizeriter_new_impl(type, source);
+ extra_tokens = PyObject_IsTrue(fastargs[1]);
+ if (extra_tokens < 0) {
+ goto exit;
+ }
+ return_value = tokenizeriter_new_impl(type, source, extra_tokens);
exit:
return return_value;
}
-/*[clinic end generated code: output=8c2c09f651961986 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=940b564c67f6e0e2 input=a9049054013a1b77]*/