diff options
Diffstat (limited to 'Lib/lib2to3/pgen2')
-rw-r--r-- | Lib/lib2to3/pgen2/conv.py | 66 | ||||
-rw-r--r-- | Lib/lib2to3/pgen2/driver.py | 2 | ||||
-rw-r--r-- | Lib/lib2to3/pgen2/grammar.py | 12 | ||||
-rw-r--r-- | Lib/lib2to3/pgen2/literals.py | 2 | ||||
-rw-r--r-- | Lib/lib2to3/pgen2/pgen.py | 40 | ||||
-rwxr-xr-x | Lib/lib2to3/pgen2/token.py | 2 | ||||
-rw-r--r-- | Lib/lib2to3/pgen2/tokenize.py | 16 |
7 files changed, 71 insertions, 69 deletions
diff --git a/Lib/lib2to3/pgen2/conv.py b/Lib/lib2to3/pgen2/conv.py index 5d788a1..1d648d6 100644 --- a/Lib/lib2to3/pgen2/conv.py +++ b/Lib/lib2to3/pgen2/conv.py @@ -60,8 +60,8 @@ class Converter(grammar.Grammar): """ try: f = open(filename) - except IOError, err: - print "Can't open %s: %s" % (filename, err) + except IOError as err: + print("Can't open %s: %s" % (filename, err)) return False self.symbol2number = {} self.number2symbol = {} @@ -70,8 +70,8 @@ class Converter(grammar.Grammar): lineno += 1 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): - print "%s(%s): can't parse %s" % (filename, lineno, - line.strip()) + print("%s(%s): can't parse %s" % (filename, lineno, + line.strip())) else: symbol, number = mo.groups() number = int(number) @@ -111,20 +111,20 @@ class Converter(grammar.Grammar): """ try: f = open(filename) - except IOError, err: - print "Can't open %s: %s" % (filename, err) + except IOError as err: + print("Can't open %s: %s" % (filename, err)) return False # The code below essentially uses f's iterator-ness! lineno = 0 # Expect the two #include lines - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == '#include "pgenheaders.h"\n', (lineno, line) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == '#include "grammar.h"\n', (lineno, line) # Parse the state definitions - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) allarcs = {} states = [] while line.startswith("static arc "): @@ -132,35 +132,35 @@ class Converter(grammar.Grammar): mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line) assert mo, (lineno, line) - n, m, k = map(int, mo.groups()) + n, m, k = list(map(int, mo.groups())) arcs = [] for _ in range(k): - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"\s+{(\d+), (\d+)},$", line) assert mo, (lineno, line) - i, j = map(int, mo.groups()) + i, j = list(map(int, mo.groups())) arcs.append((i, j)) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) allarcs[(n, m)] = arcs - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line) assert mo, (lineno, line) - s, t = map(int, mo.groups()) + s, t = list(map(int, mo.groups())) assert s == len(states), (lineno, line) state = [] for _ in range(t): - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line) assert mo, (lineno, line) - k, n, m = map(int, mo.groups()) + k, n, m = list(map(int, mo.groups())) arcs = allarcs[n, m] assert k == len(arcs), (lineno, line) state.append(arcs) states.append(state) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) self.states = states # Parse the dfas @@ -169,18 +169,18 @@ class Converter(grammar.Grammar): assert mo, (lineno, line) ndfas = int(mo.group(1)) for i in range(ndfas): - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line) assert mo, (lineno, line) symbol = mo.group(2) - number, x, y, z = map(int, mo.group(1, 3, 4, 5)) + number, x, y, z = list(map(int, mo.group(1, 3, 4, 5))) assert self.symbol2number[symbol] == number, (lineno, line) assert self.number2symbol[number] == symbol, (lineno, line) assert x == 0, (lineno, line) state = states[z] assert y == len(state), (lineno, line) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line) assert mo, (lineno, line) first = {} @@ -191,18 +191,18 @@ class Converter(grammar.Grammar): if byte & (1<<j): first[i*8 + j] = 1 dfas[number] = (state, first) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) self.dfas = dfas # Parse the labels labels = [] - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"static label labels\[(\d+)\] = {$", line) assert mo, (lineno, line) nlabels = int(mo.group(1)) for i in range(nlabels): - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line) assert mo, (lineno, line) x, y = mo.groups() @@ -212,35 +212,35 @@ class Converter(grammar.Grammar): else: y = eval(y) labels.append((x, y)) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) self.labels = labels # Parse the grammar struct - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "grammar _PyParser_Grammar = {\n", (lineno, line) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"\s+(\d+),$", line) assert mo, (lineno, line) ndfas = int(mo.group(1)) assert ndfas == len(self.dfas) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "\tdfas,\n", (lineno, line) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"\s+{(\d+), labels},$", line) assert mo, (lineno, line) nlabels = int(mo.group(1)) assert nlabels == len(self.labels), (lineno, line) - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) mo = re.match(r"\s+(\d+)$", line) assert mo, (lineno, line) start = int(mo.group(1)) assert start in self.number2symbol, (lineno, line) self.start = start - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) try: - lineno, line = lineno+1, f.next() + lineno, line = lineno+1, next(f) except StopIteration: pass else: diff --git a/Lib/lib2to3/pgen2/driver.py b/Lib/lib2to3/pgen2/driver.py index 77e2a40..4b5dcc0 100644 --- a/Lib/lib2to3/pgen2/driver.py +++ b/Lib/lib2to3/pgen2/driver.py @@ -99,7 +99,7 @@ class Driver(object): def parse_string(self, text, debug=False): """Parse a string and return the syntax tree.""" - tokens = tokenize.generate_tokens(generate_lines(text).next) + tokens = tokenize.generate_tokens(generate_lines(text).__next__) return self.parse_tokens(tokens, debug) diff --git a/Lib/lib2to3/pgen2/grammar.py b/Lib/lib2to3/pgen2/grammar.py index 7818c24..b7f867f 100644 --- a/Lib/lib2to3/pgen2/grammar.py +++ b/Lib/lib2to3/pgen2/grammar.py @@ -100,17 +100,17 @@ class Grammar(object): def report(self): """Dump the grammar tables to standard output, for debugging.""" from pprint import pprint - print "s2n" + print("s2n") pprint(self.symbol2number) - print "n2s" + print("n2s") pprint(self.number2symbol) - print "states" + print("states") pprint(self.states) - print "dfas" + print("dfas") pprint(self.dfas) - print "labels" + print("labels") pprint(self.labels) - print "start", self.start + print("start", self.start) # Map from operator to number (since tokenize doesn't do this) diff --git a/Lib/lib2to3/pgen2/literals.py b/Lib/lib2to3/pgen2/literals.py index 0b3948a..4f50d31 100644 --- a/Lib/lib2to3/pgen2/literals.py +++ b/Lib/lib2to3/pgen2/literals.py @@ -53,7 +53,7 @@ def test(): s = repr(c) e = evalString(s) if e != c: - print i, c, s, e + print(i, c, s, e) if __name__ == "__main__": diff --git a/Lib/lib2to3/pgen2/pgen.py b/Lib/lib2to3/pgen2/pgen.py index 220a71b..59d0361 100644 --- a/Lib/lib2to3/pgen2/pgen.py +++ b/Lib/lib2to3/pgen2/pgen.py @@ -26,7 +26,7 @@ class ParserGenerator(object): def make_grammar(self): c = PgenGrammar() - names = self.dfas.keys() + names = list(self.dfas.keys()) names.sort() names.remove(self.startsymbol) names.insert(0, self.startsymbol) @@ -39,7 +39,7 @@ class ParserGenerator(object): states = [] for state in dfa: arcs = [] - for label, next in state.arcs.iteritems(): + for label, next in state.arcs.items(): arcs.append((self.make_label(c, label), dfa.index(next))) if state.isfinal: arcs.append((0, dfa.index(state))) @@ -105,7 +105,7 @@ class ParserGenerator(object): return ilabel def addfirstsets(self): - names = self.dfas.keys() + names = list(self.dfas.keys()) names.sort() for name in names: if name not in self.first: @@ -118,7 +118,7 @@ class ParserGenerator(object): state = dfa[0] totalset = {} overlapcheck = {} - for label, next in state.arcs.iteritems(): + for label, next in state.arcs.items(): if label in self.dfas: if label in self.first: fset = self.first[label] @@ -133,7 +133,7 @@ class ParserGenerator(object): totalset[label] = 1 overlapcheck[label] = {label: 1} inverse = {} - for label, itsfirst in overlapcheck.iteritems(): + for label, itsfirst in overlapcheck.items(): for symbol in itsfirst: if symbol in inverse: raise ValueError("rule %s is ambiguous; %s is in the" @@ -192,7 +192,7 @@ class ParserGenerator(object): for label, next in nfastate.arcs: if label is not None: addclosure(next, arcs.setdefault(label, {})) - for label, nfaset in arcs.iteritems(): + for label, nfaset in arcs.items(): for st in states: if st.nfaset == nfaset: break @@ -203,10 +203,10 @@ class ParserGenerator(object): return states # List of DFAState instances; first one is start def dump_nfa(self, name, start, finish): - print "Dump of NFA for", name + print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): - print " State", i, state is finish and "(final)" or "" + print(" State", i, state is finish and "(final)" or "") for label, next in state.arcs: if next in todo: j = todo.index(next) @@ -214,16 +214,16 @@ class ParserGenerator(object): j = len(todo) todo.append(next) if label is None: - print " -> %d" % j + print(" -> %d" % j) else: - print " %s -> %d" % (label, j) + print(" %s -> %d" % (label, j)) def dump_dfa(self, name, dfa): - print "Dump of DFA for", name + print("Dump of DFA for", name) for i, state in enumerate(dfa): - print " State", i, state.isfinal and "(final)" or "" - for label, next in state.arcs.iteritems(): - print " %s -> %d" % (label, dfa.index(next)) + print(" State", i, state.isfinal and "(final)" or "") + for label, next in state.arcs.items(): + print(" %s -> %d" % (label, dfa.index(next))) def simplify_dfa(self, dfa): # This is not theoretically optimal, but works well enough. @@ -319,9 +319,9 @@ class ParserGenerator(object): return value def gettoken(self): - tup = self.generator.next() + tup = next(self.generator) while tup[0] in (tokenize.COMMENT, tokenize.NL): - tup = self.generator.next() + tup = next(self.generator) self.type, self.value, self.begin, self.end, self.line = tup #print token.tok_name[self.type], repr(self.value) @@ -330,7 +330,7 @@ class ParserGenerator(object): try: msg = msg % args except: - msg = " ".join([msg] + map(str, args)) + msg = " ".join([msg] + list(map(str, args))) raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) @@ -348,7 +348,7 @@ class DFAState(object): def __init__(self, nfaset, final): assert isinstance(nfaset, dict) - assert isinstance(iter(nfaset).next(), NFAState) + assert isinstance(next(iter(nfaset)), NFAState) assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset @@ -361,7 +361,7 @@ class DFAState(object): self.arcs[label] = next def unifystate(self, old, new): - for label, next in self.arcs.iteritems(): + for label, next in self.arcs.items(): if next is old: self.arcs[label] = new @@ -374,7 +374,7 @@ class DFAState(object): # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False - for label, next in self.arcs.iteritems(): + for label, next in self.arcs.items(): if next is not other.arcs.get(label): return False return True diff --git a/Lib/lib2to3/pgen2/token.py b/Lib/lib2to3/pgen2/token.py index 61468b3..1c81065 100755 --- a/Lib/lib2to3/pgen2/token.py +++ b/Lib/lib2to3/pgen2/token.py @@ -67,7 +67,7 @@ NT_OFFSET = 256 #--end constants-- tok_name = {} -for _name, _value in globals().items(): +for _name, _value in list(globals().items()): if type(_value) is type(0): tok_name[_value] = _name diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py index c31d549..33cfc33 100644 --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -94,8 +94,8 @@ ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) -tokenprog, pseudoprog, single3prog, double3prog = map( - re.compile, (Token, PseudoToken, Single3, Double3)) +tokenprog, pseudoprog, single3prog, double3prog = list(map( + re.compile, (Token, PseudoToken, Single3, Double3))) endprogs = {"'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, @@ -143,9 +143,11 @@ class TokenError(Exception): pass class StopTokenizing(Exception): pass -def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing - print "%d,%d-%d,%d:\t%s\t%s" % \ - (srow, scol, erow, ecol, tok_name[type], repr(token)) +def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing + (srow, scol) = xxx_todo_changeme + (erow, ecol) = xxx_todo_changeme1 + print("%d,%d-%d,%d:\t%s\t%s" % \ + (srow, scol, erow, ecol, tok_name[type], repr(token))) def tokenize(readline, tokeneater=printtoken): """ @@ -279,7 +281,7 @@ def generate_tokens(readline): if contstr: # continued string if not line: - raise TokenError, ("EOF in multi-line string", strstart) + raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) @@ -335,7 +337,7 @@ def generate_tokens(readline): else: # continued statement if not line: - raise TokenError, ("EOF in multi-line statement", (lnum, 0)) + raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: |