diff options
author | Victor Stinner <vstinner@python.org> | 2021-10-13 15:22:14 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-10-13 15:22:14 (GMT) |
commit | 713bb19356bce9b8f2b95461834fe1dae505f889 (patch) | |
tree | c06a7178132f94dd09b3a3df660f5093adf63517 /Parser/pegen.c | |
parent | 3901c081143ef29624f9c1cb49cc70a70321d139 (diff) | |
download | cpython-713bb19356bce9b8f2b95461834fe1dae505f889.zip cpython-713bb19356bce9b8f2b95461834fe1dae505f889.tar.gz cpython-713bb19356bce9b8f2b95461834fe1dae505f889.tar.bz2 |
bpo-45434: Mark the PyTokenizer C API as private (GH-28924)
Rename PyTokenize functions to mark them as private:
* PyTokenizer_FindEncodingFilename() => _PyTokenizer_FindEncodingFilename()
* PyTokenizer_FromString() => _PyTokenizer_FromString()
* PyTokenizer_FromFile() => _PyTokenizer_FromFile()
* PyTokenizer_FromUTF8() => _PyTokenizer_FromUTF8()
* PyTokenizer_Free() => _PyTokenizer_Free()
* PyTokenizer_Get() => _PyTokenizer_Get()
Remove the unused PyTokenizer_FindEncoding() function.
import.c: remove unused #include "errcode.h".
Diffstat (limited to 'Parser/pegen.c')
-rw-r--r-- | Parser/pegen.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/Parser/pegen.c b/Parser/pegen.c index a989635..e4d2692 100644 --- a/Parser/pegen.c +++ b/Parser/pegen.c @@ -729,7 +729,7 @@ _PyPegen_fill_token(Parser *p) { const char *start; const char *end; - int type = PyTokenizer_Get(p->tok, &start, &end); + int type = _PyTokenizer_Get(p->tok, &start, &end); // Record and skip '# type: ignore' comments while (type == TYPE_IGNORE) { @@ -746,7 +746,7 @@ _PyPegen_fill_token(Parser *p) PyErr_NoMemory(); return -1; } - type = PyTokenizer_Get(p->tok, &start, &end); + type = _PyTokenizer_Get(p->tok, &start, &end); } // If we have reached the end and we are in single input mode we need to insert a newline and reset the parsing @@ -1306,7 +1306,7 @@ _PyPegen_check_tokenizer_errors(Parser *p) { for (;;) { const char *start; const char *end; - switch (PyTokenizer_Get(p->tok, &start, &end)) { + switch (_PyTokenizer_Get(p->tok, &start, &end)) { case ERRORTOKEN: if (p->tok->level != 0) { int error_lineno = p->tok->parenlinenostack[p->tok->level-1]; @@ -1411,7 +1411,7 @@ _PyPegen_run_parser_from_file_pointer(FILE *fp, int start_rule, PyObject *filena const char *enc, const char *ps1, const char *ps2, PyCompilerFlags *flags, int *errcode, PyArena *arena) { - struct tok_state *tok = PyTokenizer_FromFile(fp, enc, ps1, ps2); + struct tok_state *tok = _PyTokenizer_FromFile(fp, enc, ps1, ps2); if (tok == NULL) { if (PyErr_Occurred()) { raise_tokenizer_init_error(filename_ob); @@ -1441,7 +1441,7 @@ _PyPegen_run_parser_from_file_pointer(FILE *fp, int start_rule, PyObject *filena _PyPegen_Parser_Free(p); error: - PyTokenizer_Free(tok); + _PyTokenizer_Free(tok); return result; } @@ -1453,9 +1453,9 @@ _PyPegen_run_parser_from_string(const char *str, int start_rule, PyObject *filen struct tok_state *tok; if (flags == NULL || flags->cf_flags & PyCF_IGNORE_COOKIE) { - tok = PyTokenizer_FromUTF8(str, exec_input); + tok = _PyTokenizer_FromUTF8(str, exec_input); } else { - tok = PyTokenizer_FromString(str, exec_input); + tok = _PyTokenizer_FromString(str, exec_input); } if (tok == NULL) { if (PyErr_Occurred()) { @@ -1483,7 +1483,7 @@ _PyPegen_run_parser_from_string(const char *str, int start_rule, PyObject *filen _PyPegen_Parser_Free(p); error: - PyTokenizer_Free(tok); + _PyTokenizer_Free(tok); return result; } |