summaryrefslogtreecommitdiffstats
path: root/Python/Python-tokenize.c
diff options
context:
space:
mode:
authorMiss Islington (bot) <31488909+miss-islington@users.noreply.github.com>2024-06-12 19:10:35 (GMT)
committerGitHub <noreply@github.com>2024-06-12 19:10:35 (GMT)
commitf75abf8bcf1664e72ac7f031bde4cbd1349fce42 (patch)
tree5ff81ab77fdcad0f29b30ca5bc2c075031345e77 /Python/Python-tokenize.c
parent319233f137a4ced96ef7def6f3e1645c4b28bb88 (diff)
downloadcpython-f75abf8bcf1664e72ac7f031bde4cbd1349fce42.zip
cpython-f75abf8bcf1664e72ac7f031bde4cbd1349fce42.tar.gz
cpython-f75abf8bcf1664e72ac7f031bde4cbd1349fce42.tar.bz2
[3.12] gh-120343: Fix column offsets of multiline tokens in tokenize (GH-120391) (#120428)
(cherry picked from commit 4b5d3e0e721a952f4ac9d17bee331e6dfe543dcd) Co-authored-by: Lysandros Nikolaou <lisandrosnik@gmail.com>
Diffstat (limited to 'Python/Python-tokenize.c')
-rw-r--r--Python/Python-tokenize.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c
index ebcd9ce..baad836 100644
--- a/Python/Python-tokenize.c
+++ b/Python/Python-tokenize.c
@@ -214,6 +214,7 @@ tokenizeriter_next(tokenizeriterobject *it)
const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start : it->tok->line_start;
PyObject* line = NULL;
+ int line_changed = 1;
if (it->tok->tok_extra_tokens && is_trailing_token) {
line = PyUnicode_FromString("");
} else {
@@ -228,12 +229,11 @@ tokenizeriter_next(tokenizeriterobject *it)
Py_XDECREF(it->last_line);
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
it->last_line = line;
- if (it->tok->lineno != it->last_end_lineno) {
- it->byte_col_offset_diff = 0;
- }
+ it->byte_col_offset_diff = 0;
} else {
// Line hasn't changed so we reuse the cached one.
line = it->last_line;
+ line_changed = 0;
}
}
if (line == NULL) {
@@ -251,7 +251,13 @@ tokenizeriter_next(tokenizeriterobject *it)
Py_ssize_t byte_offset = -1;
if (token.start != NULL && token.start >= line_start) {
byte_offset = token.start - line_start;
- col_offset = byte_offset - it->byte_col_offset_diff;
+ if (line_changed) {
+ col_offset = _PyPegen_byte_offset_to_character_offset_line(line, 0, byte_offset);
+ it->byte_col_offset_diff = byte_offset - col_offset;
+ }
+ else {
+ col_offset = byte_offset - it->byte_col_offset_diff;
+ }
}
if (token.end != NULL && token.end >= it->tok->line_start) {
Py_ssize_t end_byte_offset = token.end - it->tok->line_start;