diff options
author | Michael J. Sullivan <sully@msully.net> | 2019-05-22 14:54:20 (GMT) |
---|---|---|
committer | Ivan Levkivskyi <levkivskyi@gmail.com> | 2019-05-22 14:54:20 (GMT) |
commit | 933e1509ec6efa8e6ab8c8c7ce02059ce2b6d9b9 (patch) | |
tree | 97980dec3873370773b481e2bc1f08f9f1624b9d /Parser | |
parent | 4c7a46eb3c009c85ddf2eb315d94d804745187d4 (diff) | |
download | cpython-933e1509ec6efa8e6ab8c8c7ce02059ce2b6d9b9.zip cpython-933e1509ec6efa8e6ab8c8c7ce02059ce2b6d9b9.tar.gz cpython-933e1509ec6efa8e6ab8c8c7ce02059ce2b6d9b9.tar.bz2 |
bpo-36878: Track extra text added to 'type: ignore' in the AST (GH-13479)
GH-13238 made extra text after a # type: ignore accepted by the parser.
This finishes the job and actually plumbs the extra text through the
parser and makes it available in the AST.
Diffstat (limited to 'Parser')
-rw-r--r-- | Parser/Python.asdl | 3 | ||||
-rw-r--r-- | Parser/parsetok.c | 43 | ||||
-rw-r--r-- | Parser/tokenizer.c | 8 |
3 files changed, 35 insertions, 19 deletions
diff --git a/Parser/Python.asdl b/Parser/Python.asdl index 626fa4f..882f5d1 100644 --- a/Parser/Python.asdl +++ b/Parser/Python.asdl @@ -125,6 +125,5 @@ module Python withitem = (expr context_expr, expr? optional_vars) - type_ignore = TypeIgnore(int lineno) + type_ignore = TypeIgnore(int lineno, string tag) } - diff --git a/Parser/parsetok.c b/Parser/parsetok.c index 31be0eb..55fd7f7 100644 --- a/Parser/parsetok.c +++ b/Parser/parsetok.c @@ -16,13 +16,16 @@ static node *parsetok(struct tok_state *, grammar *, int, perrdetail *, int *); static int initerr(perrdetail *err_ret, PyObject * filename); typedef struct { - int *items; + struct { + int lineno; + char *comment; + } *items; size_t size; size_t num_items; -} growable_int_array; +} growable_comment_array; static int -growable_int_array_init(growable_int_array *arr, size_t initial_size) { +growable_comment_array_init(growable_comment_array *arr, size_t initial_size) { assert(initial_size > 0); arr->items = malloc(initial_size * sizeof(*arr->items)); arr->size = initial_size; @@ -32,7 +35,7 @@ growable_int_array_init(growable_int_array *arr, size_t initial_size) { } static int -growable_int_array_add(growable_int_array *arr, int item) { +growable_comment_array_add(growable_comment_array *arr, int lineno, char *comment) { if (arr->num_items >= arr->size) { arr->size *= 2; arr->items = realloc(arr->items, arr->size * sizeof(*arr->items)); @@ -41,13 +44,17 @@ growable_int_array_add(growable_int_array *arr, int item) { } } - arr->items[arr->num_items] = item; + arr->items[arr->num_items].lineno = lineno; + arr->items[arr->num_items].comment = comment; arr->num_items++; return 1; } static void -growable_int_array_deallocate(growable_int_array *arr) { +growable_comment_array_deallocate(growable_comment_array *arr) { + for (unsigned i = 0; i < arr->num_items; i++) { + PyObject_FREE(arr->items[i].comment); + } free(arr->items); } @@ -220,9 +227,9 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, node *n; int started = 0; int col_offset, end_col_offset; - growable_int_array type_ignores; + growable_comment_array type_ignores; - if (!growable_int_array_init(&type_ignores, 10)) { + if (!growable_comment_array_init(&type_ignores, 10)) { err_ret->error = E_NOMEM; PyTokenizer_Free(tok); return NULL; @@ -320,8 +327,7 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, } if (type == TYPE_IGNORE) { - PyObject_FREE(str); - if (!growable_int_array_add(&type_ignores, tok->lineno)) { + if (!growable_comment_array_add(&type_ignores, tok->lineno, str)) { err_ret->error = E_NOMEM; break; } @@ -355,9 +361,16 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, REQ(ch, ENDMARKER); for (i = 0; i < type_ignores.num_items; i++) { - PyNode_AddChild(ch, TYPE_IGNORE, NULL, - type_ignores.items[i], 0, - type_ignores.items[i], 0); + int res = PyNode_AddChild(ch, TYPE_IGNORE, type_ignores.items[i].comment, + type_ignores.items[i].lineno, 0, + type_ignores.items[i].lineno, 0); + if (res != 0) { + err_ret->error = res; + PyNode_Free(n); + n = NULL; + break; + } + type_ignores.items[i].comment = NULL; } } @@ -365,7 +378,7 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, is a single statement by looking at what is left in the buffer after parsing. Trailing whitespace and comments are OK. */ - if (start == single_input) { + if (err_ret->error == E_DONE && start == single_input) { char *cur = tok->cur; char c = *tok->cur; @@ -392,7 +405,7 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, else n = NULL; - growable_int_array_deallocate(&type_ignores); + growable_comment_array_deallocate(&type_ignores); #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD *flags = ps->p_flags; diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index e52d498..9b269af 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -1269,6 +1269,7 @@ tok_get(struct tok_state *tok, char **p_start, char **p_end) /* This is a type comment if we matched all of type_comment_prefix. */ if (!*prefix) { int is_type_ignore = 1; + const char *ignore_end = p + 6; tok_backup(tok, c); /* don't eat the newline or EOF */ type_start = p; @@ -1276,10 +1277,13 @@ tok_get(struct tok_state *tok, char **p_start, char **p_end) /* A TYPE_IGNORE is "type: ignore" followed by the end of the token * or anything non-alphanumeric. */ is_type_ignore = ( - tok->cur >= p + 6 && memcmp(p, "ignore", 6) == 0 - && !(tok->cur > p + 6 && isalnum(p[6]))); + tok->cur >= ignore_end && memcmp(p, "ignore", 6) == 0 + && !(tok->cur > ignore_end && isalnum(p[6]))); if (is_type_ignore) { + *p_start = (char *) ignore_end; + *p_end = tok->cur; + /* If this type ignore is the only thing on the line, consume the newline also. */ if (blankline) { tok_nextc(tok); |