summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLysandros Nikolaou <lisandrosnik@gmail.com>2020-06-11 16:09:21 (GMT)
committerGitHub <noreply@github.com>2020-06-11 16:09:21 (GMT)
commitbcd7deed9118e365c1225de2a2e1a81bf988c6ab (patch)
treede71fff4f10f86dc1fa46d5ca590fbd06dfb1a75
parent10e6506aa8261aacc89b49e629ae1c927fa5151c (diff)
downloadcpython-bcd7deed9118e365c1225de2a2e1a81bf988c6ab.zip
cpython-bcd7deed9118e365c1225de2a2e1a81bf988c6ab.tar.gz
cpython-bcd7deed9118e365c1225de2a2e1a81bf988c6ab.tar.bz2
bpo-40939: Remove PEG parser easter egg (__new_parser__) (#20802)
It no longer serves a purpose (there's only one parser) and having "new" in any name will eventually look odd. Also, it impinges on a potential sub-namespace, `__new_...__`.
-rw-r--r--Grammar/python.gram1
-rw-r--r--Lib/keyword.py1
-rwxr-xr-xLib/pydoc.py1
-rw-r--r--Parser/pegen/parse.c54
4 files changed, 10 insertions, 47 deletions
diff --git a/Grammar/python.gram b/Grammar/python.gram
index 2c350ef..745c14e 100644
--- a/Grammar/python.gram
+++ b/Grammar/python.gram
@@ -477,7 +477,6 @@ atom[expr_ty]:
| 'True' { _Py_Constant(Py_True, NULL, EXTRA) }
| 'False' { _Py_Constant(Py_False, NULL, EXTRA) }
| 'None' { _Py_Constant(Py_None, NULL, EXTRA) }
- | '__new_parser__' { RAISE_SYNTAX_ERROR("You found it!") }
| &STRING strings
| NUMBER
| &'(' (tuple | group | genexp)
diff --git a/Lib/keyword.py b/Lib/keyword.py
index afc3db3..b6a9982 100644
--- a/Lib/keyword.py
+++ b/Lib/keyword.py
@@ -19,7 +19,6 @@ kwlist = [
'False',
'None',
'True',
- '__new_parser__',
'and',
'as',
'assert',
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index a5368bf..628f9fc 100755
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -1817,7 +1817,6 @@ class Helper:
'False': '',
'None': '',
'True': '',
- '__new_parser__': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
diff --git a/Parser/pegen/parse.c b/Parser/pegen/parse.c
index 4f13bf7..d28e6c8 100644
--- a/Parser/pegen/parse.c
+++ b/Parser/pegen/parse.c
@@ -7,7 +7,7 @@ extern int Py_DebugFlag;
#else
#define D(x)
#endif
-static const int n_keyword_lists = 15;
+static const int n_keyword_lists = 9;
static KeywordToken *reserved_keywords[] = {
NULL,
NULL,
@@ -15,8 +15,8 @@ static KeywordToken *reserved_keywords[] = {
{"if", 510},
{"in", 518},
{"is", 526},
- {"as", 531},
- {"or", 532},
+ {"as", 530},
+ {"or", 531},
{NULL, -1},
},
(KeywordToken[]) {
@@ -25,7 +25,7 @@ static KeywordToken *reserved_keywords[] = {
{"for", 517},
{"def", 522},
{"not", 525},
- {"and", 533},
+ {"and", 532},
{NULL, -1},
},
(KeywordToken[]) {
@@ -65,15 +65,6 @@ static KeywordToken *reserved_keywords[] = {
{"nonlocal", 509},
{NULL, -1},
},
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- (KeywordToken[]) {
- {"__new_parser__", 530},
- {NULL, -1},
- },
};
#define file_type 1000
#define interactive_type 1001
@@ -10567,7 +10558,6 @@ slice_rule(Parser *p)
// | 'True'
// | 'False'
// | 'None'
-// | '__new_parser__'
// | &STRING strings
// | NUMBER
// | &'(' (tuple | group | genexp)
@@ -10711,30 +10701,6 @@ atom_rule(Parser *p)
D(fprintf(stderr, "%*c%s atom[%d-%d]: %s failed!\n", p->level, ' ',
p->error_indicator ? "ERROR!" : "-", _mark, p->mark, "'None'"));
}
- { // '__new_parser__'
- if (p->error_indicator) {
- D(p->level--);
- return NULL;
- }
- D(fprintf(stderr, "%*c> atom[%d-%d]: %s\n", p->level, ' ', _mark, p->mark, "'__new_parser__'"));
- Token * _keyword;
- if (
- (_keyword = _PyPegen_expect_token(p, 530)) // token='__new_parser__'
- )
- {
- D(fprintf(stderr, "%*c+ atom[%d-%d]: %s succeeded!\n", p->level, ' ', _mark, p->mark, "'__new_parser__'"));
- _res = RAISE_SYNTAX_ERROR ( "You found it!" );
- if (_res == NULL && PyErr_Occurred()) {
- p->error_indicator = 1;
- D(p->level--);
- return NULL;
- }
- goto done;
- }
- p->mark = _mark;
- D(fprintf(stderr, "%*c%s atom[%d-%d]: %s failed!\n", p->level, ' ',
- p->error_indicator ? "ERROR!" : "-", _mark, p->mark, "'__new_parser__'"));
- }
{ // &STRING strings
if (p->error_indicator) {
D(p->level--);
@@ -17313,7 +17279,7 @@ _tmp_34_rule(Parser *p)
Token * _keyword;
expr_ty z;
if (
- (_keyword = _PyPegen_expect_token(p, 531)) // token='as'
+ (_keyword = _PyPegen_expect_token(p, 530)) // token='as'
&&
(z = _PyPegen_name_token(p)) // NAME
)
@@ -17471,7 +17437,7 @@ _tmp_37_rule(Parser *p)
Token * _keyword;
expr_ty z;
if (
- (_keyword = _PyPegen_expect_token(p, 531)) // token='as'
+ (_keyword = _PyPegen_expect_token(p, 530)) // token='as'
&&
(z = _PyPegen_name_token(p)) // NAME
)
@@ -17971,7 +17937,7 @@ _tmp_46_rule(Parser *p)
Token * _keyword;
expr_ty t;
if (
- (_keyword = _PyPegen_expect_token(p, 531)) // token='as'
+ (_keyword = _PyPegen_expect_token(p, 530)) // token='as'
&&
(t = target_rule(p)) // target
)
@@ -18086,7 +18052,7 @@ _tmp_48_rule(Parser *p)
Token * _keyword;
expr_ty z;
if (
- (_keyword = _PyPegen_expect_token(p, 531)) // token='as'
+ (_keyword = _PyPegen_expect_token(p, 530)) // token='as'
&&
(z = _PyPegen_name_token(p)) // NAME
)
@@ -23892,7 +23858,7 @@ _tmp_144_rule(Parser *p)
Token * _keyword;
expr_ty c;
if (
- (_keyword = _PyPegen_expect_token(p, 532)) // token='or'
+ (_keyword = _PyPegen_expect_token(p, 531)) // token='or'
&&
(c = conjunction_rule(p)) // conjunction
)
@@ -23936,7 +23902,7 @@ _tmp_145_rule(Parser *p)
Token * _keyword;
expr_ty c;
if (
- (_keyword = _PyPegen_expect_token(p, 533)) // token='and'
+ (_keyword = _PyPegen_expect_token(p, 532)) // token='and'
&&
(c = inversion_rule(p)) // inversion
)