summaryrefslogtreecommitdiffstats
path: root/Parser/pgen/metaparser.py
blob: 074a083fb74b8b73592777f35f573b3a0b9de098 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
"""Parser for the Python metagrammar"""

import io
import tokenize  # from stdlib

from .automata import NFA, NFAState


class GrammarParser:
    """Parser for Python grammar files."""

    _translation_table = {
        tokenize.NAME: "NAME",
        tokenize.STRING: "STRING",
        tokenize.NEWLINE: "NEWLINE",
        tokenize.NL: "NL",
        tokenize.OP: "OP",
        tokenize.ENDMARKER: "ENDMARKER",
        tokenize.COMMENT: "COMMENT",
    }

    def __init__(self, grammar):
        self.grammar = grammar
        grammar_adaptor = io.StringIO(grammar)
        self.generator = tokenize.generate_tokens(grammar_adaptor.readline)
        self._gettoken()  # Initialize lookahead
        self._current_rule_name = None

    def parse(self):
        """Turn the grammar into a collection of NFAs"""
        # grammar: (NEWLINE | rule)* ENDMARKER
        while self.type != tokenize.ENDMARKER:
            while self.type == tokenize.NEWLINE:
                self._gettoken()
            # rule: NAME ':' rhs NEWLINE
            self._current_rule_name = self._expect(tokenize.NAME)
            self._expect(tokenize.OP, ":")
            a, z = self._parse_rhs()
            self._expect(tokenize.NEWLINE)

            yield NFA(a, z)

    def _parse_rhs(self):
        # rhs: items ('|' items)*
        a, z = self._parse_items()
        if self.value != "|":
            return a, z
        else:
            aa = NFAState(self._current_rule_name)
            zz = NFAState(self._current_rule_name)
            while True:
                # Allow to transit directly to the previous state and connect the end of the
                # previous state to the end of the current one, effectively allowing to skip
                # the current state.
                aa.add_arc(a)
                z.add_arc(zz)
                if self.value != "|":
                    break

                self._gettoken()
                a, z = self._parse_items()
            return aa, zz

    def _parse_items(self):
        # items: item+
        a, b = self._parse_item()
        while self.type in (tokenize.NAME, tokenize.STRING) or self.value in ("(", "["):
            c, d = self._parse_item()
            # Allow a transition between the end of the previous state
            # and the beginning of the new one, connecting all the items
            # together. In this way we can only reach the end if we visit
            # all the items.
            b.add_arc(c)
            b = d
        return a, b

    def _parse_item(self):
        # item: '[' rhs ']' | atom ['+' | '*']
        if self.value == "[":
            self._gettoken()
            a, z = self._parse_rhs()
            self._expect(tokenize.OP, "]")
            # Make a transition from the beginning to the end so it is possible to
            # advance for free to the next state of this item # without consuming
            # anything from the rhs.
            a.add_arc(z)
            return a, z
        else:
            a, z = self._parse_atom()
            value = self.value
            if value not in ("+", "*"):
                return a, z
            self._gettoken()
            z.add_arc(a)
            if value == "+":
                # Create a cycle to the beginning so we go back to the old state in this
                # item and repeat.
                return a, z
            else:
                # The end state is the same as the beginning, so we can cycle arbitrarily
                # and end in the beginning if necessary.
                return a, a

    def _parse_atom(self):
        # atom: '(' rhs ')' | NAME | STRING
        if self.value == "(":
            self._gettoken()
            a, z = self._parse_rhs()
            self._expect(tokenize.OP, ")")
            return a, z
        elif self.type in (tokenize.NAME, tokenize.STRING):
            a = NFAState(self._current_rule_name)
            z = NFAState(self._current_rule_name)
            # We can transit to the next state only if we consume the value.
            a.add_arc(z, self.value)
            self._gettoken()
            return a, z
        else:
            self._raise_error(
                "expected (...) or NAME or STRING, got {} ({})",
                self._translation_table.get(self.type, self.type),
                self.value,
            )

    def _expect(self, type_, value=None):
        if self.type != type_:
            self._raise_error(
                "expected {}, got {} ({})",
                self._translation_table.get(type_, type_),
                self._translation_table.get(self.type, self.type),
                self.value,
            )
        if value is not None and self.value != value:
            self._raise_error("expected {}, got {}", value, self.value)
        value = self.value
        self._gettoken()
        return value

    def _gettoken(self):
        tup = next(self.generator)
        while tup[0] in (tokenize.COMMENT, tokenize.NL):
            tup = next(self.generator)
        self.type, self.value, self.begin, self.end, self.line = tup

    def _raise_error(self, msg, *args):
        if args:
            try:
                msg = msg.format(*args)
            except Exception:
                msg = " ".join([msg] + list(map(str, args)))
        line = self.grammar.splitlines()[self.begin[0] - 1]
        raise SyntaxError(msg, ("<grammar>", self.begin[0], self.begin[1], line))