diff options
author | Victor Stinner <vstinner@python.org> | 2022-05-11 21:22:50 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-05-11 21:22:50 (GMT) |
commit | da5727a120e426ffaf68bf3a8016491205bd2f80 (patch) | |
tree | a1d551bbc55b420de3f7c68b69a05fdf080e83f2 /Parser | |
parent | b69297ea23c0ab9866ae8bd26a347a9b5df567a6 (diff) | |
download | cpython-da5727a120e426ffaf68bf3a8016491205bd2f80.zip cpython-da5727a120e426ffaf68bf3a8016491205bd2f80.tar.gz cpython-da5727a120e426ffaf68bf3a8016491205bd2f80.tar.bz2 |
gh-92651: Remove the Include/token.h header file (#92652)
Remove the token.h header file. There was never any public tokenizer
C API. The token.h header file was only designed to be used by Python
internals.
Move Include/token.h to Include/internal/pycore_token.h. Including
this header file now requires that the Py_BUILD_CORE macro is
defined. It no longer checks for the Py_LIMITED_API macro.
Rename functions:
* PyToken_OneChar() => _PyToken_OneChar()
* PyToken_TwoChars() => _PyToken_TwoChars()
* PyToken_ThreeChars() => _PyToken_ThreeChars()
Diffstat (limited to 'Parser')
-rw-r--r-- | Parser/pegen.h | 2 | ||||
-rw-r--r-- | Parser/token.c | 8 | ||||
-rw-r--r-- | Parser/tokenizer.c | 6 | ||||
-rw-r--r-- | Parser/tokenizer.h | 2 |
4 files changed, 9 insertions, 9 deletions
diff --git a/Parser/pegen.h b/Parser/pegen.h index fe0c327..d6a6e4e 100644 --- a/Parser/pegen.h +++ b/Parser/pegen.h @@ -3,8 +3,8 @@ #define PY_SSIZE_T_CLEAN #include <Python.h> -#include <token.h> #include <pycore_ast.h> +#include <pycore_token.h> #if 0 #define PyPARSE_YIELD_IS_KEYWORD 0x0001 diff --git a/Parser/token.c b/Parser/token.c index 74bca0e..fa03fbc 100644 --- a/Parser/token.c +++ b/Parser/token.c @@ -1,7 +1,7 @@ /* Auto-generated by Tools/scripts/generate_token.py */ #include "Python.h" -#include "token.h" +#include "pycore_token.h" /* Token names */ @@ -76,7 +76,7 @@ const char * const _PyParser_TokenNames[] = { /* Return the token corresponding to a single character */ int -PyToken_OneChar(int c1) +_PyToken_OneChar(int c1) { switch (c1) { case '%': return PERCENT; @@ -107,7 +107,7 @@ PyToken_OneChar(int c1) } int -PyToken_TwoChars(int c1, int c2) +_PyToken_TwoChars(int c1, int c2) { switch (c1) { case '!': @@ -191,7 +191,7 @@ PyToken_TwoChars(int c1, int c2) } int -PyToken_ThreeChars(int c1, int c2, int c3) +_PyToken_ThreeChars(int c1, int c2, int c3) { switch (c1) { case '*': diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index c450aa8..7c79718 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -1992,10 +1992,10 @@ tok_get(struct tok_state *tok, const char **p_start, const char **p_end) /* Check for two-character token */ { int c2 = tok_nextc(tok); - int token = PyToken_TwoChars(c, c2); + int token = _PyToken_TwoChars(c, c2); if (token != OP) { int c3 = tok_nextc(tok); - int token3 = PyToken_ThreeChars(c, c2, c3); + int token3 = _PyToken_ThreeChars(c, c2, c3); if (token3 != OP) { token = token3; } @@ -2059,7 +2059,7 @@ tok_get(struct tok_state *tok, const char **p_start, const char **p_end) /* Punctuation character */ *p_start = tok->start; *p_end = tok->cur; - return PyToken_OneChar(c); + return _PyToken_OneChar(c); } int diff --git a/Parser/tokenizer.h b/Parser/tokenizer.h index 0cb6651..dba71bd 100644 --- a/Parser/tokenizer.h +++ b/Parser/tokenizer.h @@ -8,7 +8,7 @@ extern "C" { /* Tokenizer interface */ -#include "token.h" /* For token types */ +#include "pycore_token.h" /* For token types */ #define MAXINDENT 100 /* Max indentation level */ #define MAXLEVEL 200 /* Max parentheses level */ |