summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_peg_generator
diff options
context:
space:
mode:
authorPablo Galindo <Pablogsal@gmail.com>2020-09-03 14:29:55 (GMT)
committerGitHub <noreply@github.com>2020-09-03 14:29:55 (GMT)
commite55a0e971b4bf9e6473bf0ca6bebdff76c075ef6 (patch)
tree5dca77a9c05b0075139bdbd4762a643f782e114e /Lib/test/test_peg_generator
parent315a61f7a9418d904e0eea14b1f054fac3a90e9f (diff)
downloadcpython-e55a0e971b4bf9e6473bf0ca6bebdff76c075ef6.zip
cpython-e55a0e971b4bf9e6473bf0ca6bebdff76c075ef6.tar.gz
cpython-e55a0e971b4bf9e6473bf0ca6bebdff76c075ef6.tar.bz2
Fix 'gather' rules in the python parser generator (GH-22021)
Currently, empty sequences in gather rules make the conditional for gather rules fail as empty sequences evaluate as "False". We need to explicitly check for "None" (the failure condition) to avoid false negatives.
Diffstat (limited to 'Lib/test/test_peg_generator')
-rw-r--r--Lib/test/test_peg_generator/test_pegen.py16
1 files changed, 15 insertions, 1 deletions
diff --git a/Lib/test/test_peg_generator/test_pegen.py b/Lib/test/test_peg_generator/test_pegen.py
index 5b4e964..bcfee3f 100644
--- a/Lib/test/test_peg_generator/test_pegen.py
+++ b/Lib/test/test_peg_generator/test_pegen.py
@@ -74,7 +74,7 @@ class TestPegen(unittest.TestCase):
"Rule('term', 'int', Rhs([Alt([NamedItem(None, NameLeaf('NUMBER'))])]))"
)
- def test_repeat_with_separator_rules(self) -> None:
+ def test_gather(self) -> None:
grammar = """
start: ','.thing+ NEWLINE
thing: NUMBER
@@ -85,6 +85,20 @@ class TestPegen(unittest.TestCase):
"Rule('start', None, Rhs([Alt([NamedItem(None, Gather(StringLeaf(\"','\"), NameLeaf('thing'"
))
self.assertEqual(str(rules["thing"]), "thing: NUMBER")
+ parser_class = make_parser(grammar)
+ node = parse_string("42\n", parser_class)
+ assert node == [
+ [[TokenInfo(NUMBER, string="42", start=(1, 0), end=(1, 2), line="42\n")]],
+ TokenInfo(NEWLINE, string="\n", start=(1, 2), end=(1, 3), line="42\n"),
+ ]
+ node = parse_string("1, 2\n", parser_class)
+ assert node == [
+ [
+ [TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1, 2\n")],
+ [TokenInfo(NUMBER, string="2", start=(1, 3), end=(1, 4), line="1, 2\n")],
+ ],
+ TokenInfo(NEWLINE, string="\n", start=(1, 4), end=(1, 5), line="1, 2\n"),
+ ]
def test_expr_grammar(self) -> None:
grammar = """