summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiss Islington (bot) <31488909+miss-islington@users.noreply.github.com>2024-05-28 20:47:45 (GMT)
committerGitHub <noreply@github.com>2024-05-28 20:47:45 (GMT)
commit0d0be6b3efeace4743329f81c08f9720cc221207 (patch)
treeafeec3b81a48161eef6c675658a9e7aac55aaf36
parentc0e99617985d64e6134964f758ae0a1a20f9f433 (diff)
downloadcpython-0d0be6b3efeace4743329f81c08f9720cc221207.zip
cpython-0d0be6b3efeace4743329f81c08f9720cc221207.tar.gz
cpython-0d0be6b3efeace4743329f81c08f9720cc221207.tar.bz2
[3.13] gh-119118: Fix performance regression in tokenize module (GH-119615) (#119682)
- Cache line object to avoid creating a Unicode object for all of the tokens in the same line. - Speed up byte offset to column offset conversion by using the smallest buffer possible to measure the difference. (cherry picked from commit d87b0151062e36e67f9e42e1595fba5bf23a485c) Co-authored-by: Lysandros Nikolaou <lisandrosnik@gmail.com> Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
-rw-r--r--Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst2
-rw-r--r--Parser/pegen.c25
-rw-r--r--Parser/pegen.h1
-rw-r--r--Python/Python-tokenize.c44
4 files changed, 68 insertions, 4 deletions
diff --git a/Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst b/Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst
new file mode 100644
index 0000000..3cf6166
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst
@@ -0,0 +1,2 @@
+Fix performance regression in the :mod:`tokenize` module by caching the ``line``
+token attribute and calculating the column offset more efficiently.
diff --git a/Parser/pegen.c b/Parser/pegen.c
index 3d3e645..2955eab 100644
--- a/Parser/pegen.c
+++ b/Parser/pegen.c
@@ -19,6 +19,31 @@ _PyPegen_interactive_exit(Parser *p)
}
Py_ssize_t
+_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset)
+{
+ const char *data = PyUnicode_AsUTF8(line);
+
+ Py_ssize_t len = 0;
+ while (col_offset < end_col_offset) {
+ Py_UCS4 ch = data[col_offset];
+ if (ch < 0x80) {
+ col_offset += 1;
+ } else if ((ch & 0xe0) == 0xc0) {
+ col_offset += 2;
+ } else if ((ch & 0xf0) == 0xe0) {
+ col_offset += 3;
+ } else if ((ch & 0xf8) == 0xf0) {
+ col_offset += 4;
+ } else {
+ PyErr_SetString(PyExc_ValueError, "Invalid UTF-8 sequence");
+ return -1;
+ }
+ len++;
+ }
+ return len;
+}
+
+Py_ssize_t
_PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset)
{
Py_ssize_t len = strlen(str);
diff --git a/Parser/pegen.h b/Parser/pegen.h
index 57b45a5..32c64e7 100644
--- a/Parser/pegen.h
+++ b/Parser/pegen.h
@@ -148,6 +148,7 @@ int _PyPegen_fill_token(Parser *p);
expr_ty _PyPegen_name_token(Parser *p);
expr_ty _PyPegen_number_token(Parser *p);
void *_PyPegen_string_token(Parser *p);
+Py_ssize_t _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset(PyObject *line, Py_ssize_t col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_raw(const char*, Py_ssize_t col_offset);
diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c
index 41e8107..9cc4b45 100644
--- a/Python/Python-tokenize.c
+++ b/Python/Python-tokenize.c
@@ -32,6 +32,11 @@ typedef struct
{
PyObject_HEAD struct tok_state *tok;
int done;
+
+ /* Needed to cache line for performance */
+ PyObject *last_line;
+ Py_ssize_t last_lineno;
+ Py_ssize_t byte_col_offset_diff;
} tokenizeriterobject;
/*[clinic input]
@@ -68,6 +73,11 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
self->tok->tok_extra_tokens = 1;
}
self->done = 0;
+
+ self->last_line = NULL;
+ self->byte_col_offset_diff = 0;
+ self->last_lineno = 0;
+
return (PyObject *)self;
}
@@ -210,7 +220,18 @@ tokenizeriter_next(tokenizeriterobject *it)
if (size >= 1 && it->tok->implicit_newline) {
size -= 1;
}
- line = PyUnicode_DecodeUTF8(line_start, size, "replace");
+
+ if (it->tok->lineno != it->last_lineno) {
+ // Line has changed since last token, so we fetch the new line and cache it
+ // in the iter object.
+ Py_XDECREF(it->last_line);
+ line = PyUnicode_DecodeUTF8(line_start, size, "replace");
+ it->last_line = line;
+ it->byte_col_offset_diff = 0;
+ } else {
+ // Line hasn't changed so we reuse the cached one.
+ line = it->last_line;
+ }
}
if (line == NULL) {
Py_DECREF(str);
@@ -219,13 +240,28 @@ tokenizeriter_next(tokenizeriterobject *it)
Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
Py_ssize_t end_lineno = it->tok->lineno;
+ it->last_lineno = lineno;
+
Py_ssize_t col_offset = -1;
Py_ssize_t end_col_offset = -1;
+ Py_ssize_t byte_offset = -1;
if (token.start != NULL && token.start >= line_start) {
- col_offset = _PyPegen_byte_offset_to_character_offset(line, token.start - line_start);
+ byte_offset = token.start - line_start;
+ col_offset = byte_offset - it->byte_col_offset_diff;
}
if (token.end != NULL && token.end >= it->tok->line_start) {
- end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, token.end - it->tok->line_start);
+ Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
+ if (lineno == end_lineno) {
+ // If the whole token is at the same line, we can just use the token.start
+ // buffer for figuring out the new column offset, since using line is not
+ // performant for very long lines.
+ Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset);
+ end_col_offset = col_offset + token_col_offset;
+ it->byte_col_offset_diff += token.end - token.start - token_col_offset;
+ } else {
+ end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset);
+ it->byte_col_offset_diff += end_byte_offset - end_col_offset;
+ }
}
if (it->tok->tok_extra_tokens) {
@@ -262,7 +298,7 @@ tokenizeriter_next(tokenizeriterobject *it)
}
}
- result = Py_BuildValue("(iN(nn)(nn)N)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
+ result = Py_BuildValue("(iN(nn)(nn)O)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
exit:
_PyToken_Free(&token);
if (type == ENDMARKER) {