summaryrefslogtreecommitdiffstats
path: root/Lib/lib2to3
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/lib2to3')
-rw-r--r--Lib/lib2to3/Grammar.txt46
-rw-r--r--Lib/lib2to3/btm_matcher.py7
-rw-r--r--Lib/lib2to3/btm_utils.py6
-rw-r--r--Lib/lib2to3/fixer_base.py9
-rw-r--r--Lib/lib2to3/fixer_util.py109
-rw-r--r--Lib/lib2to3/fixes/fix_apply.py10
-rw-r--r--Lib/lib2to3/fixes/fix_basestring.py2
-rw-r--r--Lib/lib2to3/fixes/fix_buffer.py2
-rw-r--r--Lib/lib2to3/fixes/fix_dict.py15
-rw-r--r--Lib/lib2to3/fixes/fix_except.py14
-rw-r--r--Lib/lib2to3/fixes/fix_exec.py3
-rw-r--r--Lib/lib2to3/fixes/fix_execfile.py12
-rw-r--r--Lib/lib2to3/fixes/fix_exitfunc.py8
-rw-r--r--Lib/lib2to3/fixes/fix_filter.py30
-rw-r--r--Lib/lib2to3/fixes/fix_funcattrs.py2
-rw-r--r--Lib/lib2to3/fixes/fix_getcwdu.py2
-rw-r--r--Lib/lib2to3/fixes/fix_has_key.py7
-rw-r--r--Lib/lib2to3/fixes/fix_idioms.py26
-rw-r--r--Lib/lib2to3/fixes/fix_import.py8
-rw-r--r--Lib/lib2to3/fixes/fix_imports.py2
-rw-r--r--Lib/lib2to3/fixes/fix_input.py4
-rw-r--r--Lib/lib2to3/fixes/fix_intern.py27
-rw-r--r--Lib/lib2to3/fixes/fix_isinstance.py2
-rw-r--r--Lib/lib2to3/fixes/fix_itertools.py2
-rw-r--r--Lib/lib2to3/fixes/fix_itertools_imports.py8
-rw-r--r--Lib/lib2to3/fixes/fix_long.py2
-rw-r--r--Lib/lib2to3/fixes/fix_map.py43
-rw-r--r--Lib/lib2to3/fixes/fix_metaclass.py30
-rw-r--r--Lib/lib2to3/fixes/fix_methodattrs.py2
-rw-r--r--Lib/lib2to3/fixes/fix_ne.py4
-rw-r--r--Lib/lib2to3/fixes/fix_next.py14
-rw-r--r--Lib/lib2to3/fixes/fix_nonzero.py4
-rw-r--r--Lib/lib2to3/fixes/fix_numliterals.py8
-rw-r--r--Lib/lib2to3/fixes/fix_operator.py43
-rw-r--r--Lib/lib2to3/fixes/fix_paren.py2
-rw-r--r--Lib/lib2to3/fixes/fix_print.py24
-rw-r--r--Lib/lib2to3/fixes/fix_raise.py16
-rw-r--r--Lib/lib2to3/fixes/fix_raw_input.py2
-rw-r--r--Lib/lib2to3/fixes/fix_reduce.py2
-rw-r--r--Lib/lib2to3/fixes/fix_reload.py36
-rw-r--r--Lib/lib2to3/fixes/fix_renames.py6
-rw-r--r--Lib/lib2to3/fixes/fix_repr.py2
-rw-r--r--Lib/lib2to3/fixes/fix_set_literal.py4
-rw-r--r--Lib/lib2to3/fixes/fix_standarderror.py2
-rw-r--r--Lib/lib2to3/fixes/fix_sys_exc.py6
-rw-r--r--Lib/lib2to3/fixes/fix_throw.py8
-rw-r--r--Lib/lib2to3/fixes/fix_tuple_params.py18
-rw-r--r--Lib/lib2to3/fixes/fix_types.py3
-rw-r--r--Lib/lib2to3/fixes/fix_unicode.py12
-rw-r--r--Lib/lib2to3/fixes/fix_urllib.py3
-rw-r--r--Lib/lib2to3/fixes/fix_ws_comma.py10
-rw-r--r--Lib/lib2to3/fixes/fix_xrange.py10
-rw-r--r--Lib/lib2to3/fixes/fix_xreadlines.py2
-rw-r--r--Lib/lib2to3/fixes/fix_zip.py21
-rw-r--r--Lib/lib2to3/main.py29
-rw-r--r--Lib/lib2to3/patcomp.py10
-rw-r--r--Lib/lib2to3/pgen2/conv.py66
-rw-r--r--Lib/lib2to3/pgen2/driver.py14
-rw-r--r--Lib/lib2to3/pgen2/grammar.py47
-rw-r--r--Lib/lib2to3/pgen2/literals.py6
-rw-r--r--Lib/lib2to3/pgen2/parse.py3
-rw-r--r--Lib/lib2to3/pgen2/pgen.py42
-rwxr-xr-xLib/lib2to3/pgen2/token.py10
-rw-r--r--Lib/lib2to3/pgen2/tokenize.py197
-rw-r--r--Lib/lib2to3/pygram.py5
-rw-r--r--Lib/lib2to3/pytree.py76
-rw-r--r--Lib/lib2to3/refactor.py97
-rw-r--r--Lib/lib2to3/tests/__init__.py22
-rw-r--r--Lib/lib2to3/tests/__main__.py4
-rw-r--r--Lib/lib2to3/tests/data/crlf.py6
-rw-r--r--Lib/lib2to3/tests/data/py3_test_grammar.py26
-rwxr-xr-xLib/lib2to3/tests/pytree_idempotency.py28
-rw-r--r--Lib/lib2to3/tests/support.py10
-rw-r--r--Lib/lib2to3/tests/test_all_fixers.py6
-rw-r--r--Lib/lib2to3/tests/test_fixers.py174
-rw-r--r--Lib/lib2to3/tests/test_main.py36
-rw-r--r--Lib/lib2to3/tests/test_parser.py349
-rw-r--r--Lib/lib2to3/tests/test_pytree.py22
-rw-r--r--Lib/lib2to3/tests/test_refactor.py59
-rw-r--r--Lib/lib2to3/tests/test_util.py13
80 files changed, 815 insertions, 1244 deletions
diff --git a/Lib/lib2to3/Grammar.txt b/Lib/lib2to3/Grammar.txt
index 68b7386..9be7c9f 100644
--- a/Lib/lib2to3/Grammar.txt
+++ b/Lib/lib2to3/Grammar.txt
@@ -1,7 +1,26 @@
# Grammar for 2to3. This grammar supports Python 2.x and 3.x.
-# NOTE WELL: You should also follow all the steps listed at
-# https://devguide.python.org/grammar/
+# Note: Changing the grammar specified in this file will most likely
+# require corresponding changes in the parser module
+# (../Modules/parsermodule.c). If you can't make the changes to
+# that module yourself, please co-ordinate the required changes
+# with someone who can; ask around on python-dev for help. Fred
+# Drake <fdrake@acm.org> will probably be listening there.
+
+# NOTE WELL: You should also follow all the steps listed in PEP 306,
+# "How to Change Python's Grammar"
+
+# Commands for Kees Blom's railroad program
+#diagram:token NAME
+#diagram:token NUMBER
+#diagram:token STRING
+#diagram:token NEWLINE
+#diagram:token ENDMARKER
+#diagram:token INDENT
+#diagram:output\input python.bla
+#diagram:token DEDENT
+#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm
+#diagram:rules
# Start symbols for the grammar:
# file_input is a module or sequence of commands read from an input file;
@@ -14,18 +33,17 @@ eval_input: testlist NEWLINE* ENDMARKER
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
decorators: decorator+
-decorated: decorators (classdef | funcdef | async_funcdef)
-async_funcdef: ASYNC funcdef
+decorated: decorators (classdef | funcdef)
funcdef: 'def' NAME parameters ['->' test] ':' suite
parameters: '(' [typedargslist] ')'
typedargslist: ((tfpdef ['=' test] ',')*
- ('*' [tname] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [','])
+ ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
| tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
tname: NAME [':' test]
tfpdef: tname | '(' tfplist ')'
tfplist: tfpdef (',' tfpdef)* [',']
varargslist: ((vfpdef ['=' test] ',')*
- ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]] | '**' vname [','])
+ ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname)
| vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
vname: NAME
vfpdef: vname | '(' vfplist ')'
@@ -35,13 +53,12 @@ stmt: simple_stmt | compound_stmt
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | exec_stmt | assert_stmt)
-expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
+expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
-annassign: ':' test ['=' test]
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
-# For normal and annotated assignments, additional restrictions enforced by the interpreter
+# For normal assignments, additional restrictions enforced by the interpreter
print_stmt: 'print' ( [ test (',' test)* [','] ] |
'>>' test [ (',' test)+ [','] ] )
del_stmt: 'del' exprlist
@@ -65,8 +82,7 @@ global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
exec_stmt: 'exec' expr ['in' test [',' test]]
assert_stmt: 'assert' test [',' test]
-compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
-async_stmt: ASYNC (funcdef | with_stmt | for_stmt)
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
while_stmt: 'while' test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
@@ -105,7 +121,7 @@ shift_expr: arith_expr (('<<'|'>>') arith_expr)*
arith_expr: term (('+'|'-') term)*
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power
-power: [AWAIT] atom trailer* ['**' factor]
+power: atom trailer* ['**' factor]
atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictsetmaker] '}' |
@@ -138,11 +154,11 @@ arglist: argument (',' argument)* [',']
# that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] |
test '=' test |
- '**' test |
- '*' test )
+ '**' expr |
+ star_expr )
comp_iter: comp_for | comp_if
-comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [comp_iter]
+comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
comp_if: 'if' old_test [comp_iter]
testlist1: test (',' test)*
diff --git a/Lib/lib2to3/btm_matcher.py b/Lib/lib2to3/btm_matcher.py
index 3b78868..736ba2b 100644
--- a/Lib/lib2to3/btm_matcher.py
+++ b/Lib/lib2to3/btm_matcher.py
@@ -104,7 +104,7 @@ class BottomMatcher(object):
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
- if isinstance(child, pytree.Leaf) and child.value == ";":
+ if isinstance(child, pytree.Leaf) and child.value == u";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
@@ -117,7 +117,10 @@ class BottomMatcher(object):
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
+ if not fixer in results:
+ results[fixer] = []
results[fixer].append(current_ast_node)
+
else:
#matching failed, reset automaton
current_ac_node = self.root
@@ -131,6 +134,8 @@ class BottomMatcher(object):
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
+ if not fixer in results.keys():
+ results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
diff --git a/Lib/lib2to3/btm_utils.py b/Lib/lib2to3/btm_utils.py
index ff76ba3..501f834 100644
--- a/Lib/lib2to3/btm_utils.py
+++ b/Lib/lib2to3/btm_utils.py
@@ -96,7 +96,8 @@ class MinNode(object):
def leaves(self):
"Generator that returns the leaves of the tree"
for child in self.children:
- yield from child.leaves()
+ for x in child.leaves():
+ yield x
if not self.children:
yield self
@@ -276,6 +277,7 @@ def rec_test(sequence, test_func):
sub-iterables"""
for x in sequence:
if isinstance(x, (list, tuple)):
- yield from rec_test(x, test_func)
+ for y in rec_test(x, test_func):
+ yield y
else:
yield test_func(x)
diff --git a/Lib/lib2to3/fixer_base.py b/Lib/lib2to3/fixer_base.py
index df581a4..2f50ad3 100644
--- a/Lib/lib2to3/fixer_base.py
+++ b/Lib/lib2to3/fixer_base.py
@@ -26,6 +26,7 @@ class BaseFix(object):
pattern_tree = None # Tree representation of the pattern
options = None # Options object passed to initializer
filename = None # The filename (set by set_filename)
+ logger = None # A logger (set by set_filename)
numbers = itertools.count(1) # For new_name()
used_names = set() # A set of all used NAMEs
order = "post" # Does the fixer prefer pre- or post-order traversal
@@ -68,7 +69,7 @@ class BaseFix(object):
with_tree=True)
def set_filename(self, filename):
- """Set the filename.
+ """Set the filename, and a logger derived from it.
The main refactoring tool should call this.
"""
@@ -102,14 +103,14 @@ class BaseFix(object):
"""
raise NotImplementedError()
- def new_name(self, template="xxx_todo_changeme"):
+ def new_name(self, template=u"xxx_todo_changeme"):
"""Return a string suitable for use as an identifier
The new name is guaranteed not to conflict with other identifiers.
"""
name = template
while name in self.used_names:
- name = template + str(next(self.numbers))
+ name = template + unicode(self.numbers.next())
self.used_names.add(name)
return name
@@ -128,7 +129,7 @@ class BaseFix(object):
"""
lineno = node.get_lineno()
for_output = node.clone()
- for_output.prefix = ""
+ for_output.prefix = u""
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
diff --git a/Lib/lib2to3/fixer_util.py b/Lib/lib2to3/fixer_util.py
index c2a3a47..78fdf26 100644
--- a/Lib/lib2to3/fixer_util.py
+++ b/Lib/lib2to3/fixer_util.py
@@ -1,6 +1,8 @@
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
+from itertools import islice
+
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
@@ -14,24 +16,24 @@ from . import patcomp
def KeywordArg(keyword, value):
return Node(syms.argument,
- [keyword, Leaf(token.EQUAL, "="), value])
+ [keyword, Leaf(token.EQUAL, u"="), value])
def LParen():
- return Leaf(token.LPAR, "(")
+ return Leaf(token.LPAR, u"(")
def RParen():
- return Leaf(token.RPAR, ")")
+ return Leaf(token.RPAR, u")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
- source.prefix = " "
+ source.prefix = u" "
source = [source]
return Node(syms.atom,
- target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
+ target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
@@ -43,11 +45,11 @@ def Attr(obj, attr):
def Comma():
"""A comma leaf"""
- return Leaf(token.COMMA, ",")
+ return Leaf(token.COMMA, u",")
def Dot():
"""A period (.) leaf"""
- return Leaf(token.DOT, ".")
+ return Leaf(token.DOT, u".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
@@ -65,20 +67,20 @@ def Call(func_name, args=None, prefix=None):
def Newline():
"""A newline literal"""
- return Leaf(token.NEWLINE, "\n")
+ return Leaf(token.NEWLINE, u"\n")
def BlankLine():
"""A blank line"""
- return Leaf(token.NEWLINE, "")
+ return Leaf(token.NEWLINE, u"")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
- return Node(syms.trailer, [Leaf(token.LBRACE, "["),
+ return Node(syms.trailer, [Leaf(token.LBRACE, u"["),
index_node,
- Leaf(token.RBRACE, "]")])
+ Leaf(token.RBRACE, u"]")])
def String(string, prefix=None):
"""A string leaf"""
@@ -89,24 +91,24 @@ def ListComp(xp, fp, it, test=None):
If test is None, the "if test" part is omitted.
"""
- xp.prefix = ""
- fp.prefix = " "
- it.prefix = " "
- for_leaf = Leaf(token.NAME, "for")
- for_leaf.prefix = " "
- in_leaf = Leaf(token.NAME, "in")
- in_leaf.prefix = " "
+ xp.prefix = u""
+ fp.prefix = u" "
+ it.prefix = u" "
+ for_leaf = Leaf(token.NAME, u"for")
+ for_leaf.prefix = u" "
+ in_leaf = Leaf(token.NAME, u"in")
+ in_leaf.prefix = u" "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
- test.prefix = " "
- if_leaf = Leaf(token.NAME, "if")
- if_leaf.prefix = " "
+ test.prefix = u" "
+ if_leaf = Leaf(token.NAME, u"if")
+ if_leaf.prefix = u" "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
- [Leaf(token.LBRACE, "["),
+ [Leaf(token.LBRACE, u"["),
inner,
- Leaf(token.RBRACE, "]")])
+ Leaf(token.RBRACE, u"]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
@@ -120,36 +122,13 @@ def FromImport(package_name, name_leafs):
# Pull the leaves out of their old tree
leaf.remove()
- children = [Leaf(token.NAME, "from"),
- Leaf(token.NAME, package_name, prefix=" "),
- Leaf(token.NAME, "import", prefix=" "),
+ children = [Leaf(token.NAME, u"from"),
+ Leaf(token.NAME, package_name, prefix=u" "),
+ Leaf(token.NAME, u"import", prefix=u" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
-def ImportAndCall(node, results, names):
- """Returns an import statement and calls a method
- of the module:
-
- import module
- module.name()"""
- obj = results["obj"].clone()
- if obj.type == syms.arglist:
- newarglist = obj.clone()
- else:
- newarglist = Node(syms.arglist, [obj.clone()])
- after = results["after"]
- if after:
- after = [n.clone() for n in after]
- new = Node(syms.power,
- Attr(Name(names[0]), Name(names[1])) +
- [Node(syms.trailer,
- [results["lpar"].clone(),
- newarglist,
- results["rpar"].clone()])] + after)
- new.prefix = node.prefix
- return new
-
###########################################################
### Determine whether a node represents a given literal
@@ -164,8 +143,8 @@ def is_tuple(node):
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
- and node.children[0].value == "("
- and node.children[2].value == ")")
+ and node.children[0].value == u"("
+ and node.children[2].value == u")")
def is_list(node):
"""Does the node represent a list literal?"""
@@ -173,8 +152,8 @@ def is_list(node):
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
- and node.children[0].value == "["
- and node.children[-1].value == "]")
+ and node.children[0].value == u"["
+ and node.children[-1].value == u"]")
###########################################################
@@ -185,8 +164,8 @@ def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
-consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum",
- "min", "max", "enumerate"}
+consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
+ "min", "max", "enumerate"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
@@ -276,7 +255,7 @@ def find_indentation(node):
if indent.type == token.INDENT:
return indent.value
node = node.parent
- return ""
+ return u""
###########################################################
### The following functions are to find bindings in a suite
@@ -347,17 +326,17 @@ def touch_import(package, name, node):
if package is None:
import_ = Node(syms.import_name, [
- Leaf(token.NAME, "import"),
- Leaf(token.NAME, name, prefix=" ")
+ Leaf(token.NAME, u"import"),
+ Leaf(token.NAME, name, prefix=u" ")
])
else:
- import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")])
+ import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u" ")])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
-_def_syms = {syms.classdef, syms.funcdef}
+_def_syms = set([syms.classdef, syms.funcdef])
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
@@ -400,7 +379,7 @@ def find_binding(name, node, package=None):
return ret
return None
-_block_syms = {syms.funcdef, syms.classdef, syms.trailer}
+_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
def _find(name, node):
nodes = [node]
while nodes:
@@ -412,7 +391,7 @@ def _find(name, node):
return None
def _is_import_binding(node, name, package=None):
- """ Will return node if node will import name, or node
+ """ Will reuturn node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
@@ -432,12 +411,12 @@ def _is_import_binding(node, name, package=None):
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
- # str(...) is used to make life easier here, because
+ # unicode(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
- if package and str(node.children[1]).strip() != package:
+ if package and unicode(node.children[1]).strip() != package:
return None
n = node.children[3]
- if package and _find("as", n):
+ if package and _find(u"as", n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
diff --git a/Lib/lib2to3/fixes/fix_apply.py b/Lib/lib2to3/fixes/fix_apply.py
index 6408582..1a465c2 100644
--- a/Lib/lib2to3/fixes/fix_apply.py
+++ b/Lib/lib2to3/fixes/fix_apply.py
@@ -37,8 +37,10 @@ class FixApply(fixer_base.BaseFix):
# I feel like we should be able to express this logic in the
# PATTERN above but I don't know how to do it so...
if args:
+ if args.type == self.syms.star_expr:
+ return # Make no change.
if (args.type == self.syms.argument and
- args.children[0].value in {'**', '*'}):
+ args.children[0].value == '**'):
return # Make no change.
if kwds and (kwds.type == self.syms.argument and
kwds.children[0].value == '**'):
@@ -56,12 +58,12 @@ class FixApply(fixer_base.BaseFix):
if kwds is not None:
kwds = kwds.clone()
kwds.prefix = ""
- l_newargs = [pytree.Leaf(token.STAR, "*"), args]
+ l_newargs = [pytree.Leaf(token.STAR, u"*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
- pytree.Leaf(token.DOUBLESTAR, "**"),
+ pytree.Leaf(token.DOUBLESTAR, u"**"),
kwds])
- l_newargs[-2].prefix = " " # that's the ** token
+ l_newargs[-2].prefix = u" " # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
diff --git a/Lib/lib2to3/fixes/fix_basestring.py b/Lib/lib2to3/fixes/fix_basestring.py
index 5fe69a0..a3c9a43 100644
--- a/Lib/lib2to3/fixes/fix_basestring.py
+++ b/Lib/lib2to3/fixes/fix_basestring.py
@@ -11,4 +11,4 @@ class FixBasestring(fixer_base.BaseFix):
PATTERN = "'basestring'"
def transform(self, node, results):
- return Name("str", prefix=node.prefix)
+ return Name(u"str", prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_buffer.py b/Lib/lib2to3/fixes/fix_buffer.py
index f9a1958..c6b0928 100644
--- a/Lib/lib2to3/fixes/fix_buffer.py
+++ b/Lib/lib2to3/fixes/fix_buffer.py
@@ -19,4 +19,4 @@ class FixBuffer(fixer_base.BaseFix):
def transform(self, node, results):
name = results["name"]
- name.replace(Name("memoryview", prefix=name.prefix))
+ name.replace(Name(u"memoryview", prefix=name.prefix))
diff --git a/Lib/lib2to3/fixes/fix_dict.py b/Lib/lib2to3/fixes/fix_dict.py
index d3655c9..f681e4d 100644
--- a/Lib/lib2to3/fixes/fix_dict.py
+++ b/Lib/lib2to3/fixes/fix_dict.py
@@ -30,12 +30,13 @@ as an argument to a function that introspects the argument).
# Local imports
from .. import pytree
from .. import patcomp
+from ..pgen2 import token
from .. import fixer_base
-from ..fixer_util import Name, Call, Dot
+from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
-iter_exempt = fixer_util.consuming_calls | {"iter"}
+iter_exempt = fixer_util.consuming_calls | set(["iter"])
class FixDict(fixer_base.BaseFix):
@@ -57,11 +58,11 @@ class FixDict(fixer_base.BaseFix):
tail = results["tail"]
syms = self.syms
method_name = method.value
- isiter = method_name.startswith("iter")
- isview = method_name.startswith("view")
+ isiter = method_name.startswith(u"iter")
+ isview = method_name.startswith(u"view")
if isiter or isview:
method_name = method_name[4:]
- assert method_name in ("keys", "items", "values"), repr(method)
+ assert method_name in (u"keys", u"items", u"values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
@@ -72,8 +73,8 @@ class FixDict(fixer_base.BaseFix):
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
- new.prefix = ""
- new = Call(Name("iter" if isiter else "list"), [new])
+ new.prefix = u""
+ new = Call(Name(u"iter" if isiter else u"list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
diff --git a/Lib/lib2to3/fixes/fix_except.py b/Lib/lib2to3/fixes/fix_except.py
index 49bd3d5..e324718 100644
--- a/Lib/lib2to3/fixes/fix_except.py
+++ b/Lib/lib2to3/fixes/fix_except.py
@@ -30,7 +30,7 @@ from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
def find_excepts(nodes):
for i, n in enumerate(nodes):
if n.type == syms.except_clause:
- if n.children[0].value == 'except':
+ if n.children[0].value == u'except':
yield (n, nodes[i+2])
class FixExcept(fixer_base.BaseFix):
@@ -53,13 +53,13 @@ class FixExcept(fixer_base.BaseFix):
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
- comma.replace(Name("as", prefix=" "))
+ comma.replace(Name(u"as", prefix=u" "))
if N.type != token.NAME:
# Generate a new N for the except clause
- new_N = Name(self.new_name(), prefix=" ")
+ new_N = Name(self.new_name(), prefix=u" ")
target = N.clone()
- target.prefix = ""
+ target.prefix = u""
N.replace(new_N)
new_N = new_N.clone()
@@ -75,7 +75,7 @@ class FixExcept(fixer_base.BaseFix):
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
- assign = Assign(target, Attr(new_N, Name('args')))
+ assign = Assign(target, Attr(new_N, Name(u'args')))
else:
assign = Assign(target, new_N)
@@ -83,10 +83,10 @@ class FixExcept(fixer_base.BaseFix):
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
- elif N.prefix == "":
+ elif N.prefix == u"":
# No space after a comma is legal; no space after "as",
# not so much.
- N.prefix = " "
+ N.prefix = u" "
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
diff --git a/Lib/lib2to3/fixes/fix_exec.py b/Lib/lib2to3/fixes/fix_exec.py
index ab921ee..50e1854 100644
--- a/Lib/lib2to3/fixes/fix_exec.py
+++ b/Lib/lib2to3/fixes/fix_exec.py
@@ -10,6 +10,7 @@ exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
+from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
@@ -36,4 +37,4 @@ class FixExec(fixer_base.BaseFix):
if c is not None:
args.extend([Comma(), c.clone()])
- return Call(Name("exec"), args, prefix=node.prefix)
+ return Call(Name(u"exec"), args, prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_execfile.py b/Lib/lib2to3/fixes/fix_execfile.py
index b6c786f..786268b 100644
--- a/Lib/lib2to3/fixes/fix_execfile.py
+++ b/Lib/lib2to3/fixes/fix_execfile.py
@@ -33,21 +33,21 @@ class FixExecfile(fixer_base.BaseFix):
# Construct open().read().
open_args = ArgList([filename.clone(), Comma(), String('"rb"', ' ')],
rparen=execfile_paren)
- open_call = Node(syms.power, [Name("open"), open_args])
- read = [Node(syms.trailer, [Dot(), Name('read')]),
+ open_call = Node(syms.power, [Name(u"open"), open_args])
+ read = [Node(syms.trailer, [Dot(), Name(u'read')]),
Node(syms.trailer, [LParen(), RParen()])]
open_expr = [open_call] + read
# Wrap the open call in a compile call. This is so the filename will be
# preserved in the execed code.
filename_arg = filename.clone()
- filename_arg.prefix = " "
- exec_str = String("'exec'", " ")
+ filename_arg.prefix = u" "
+ exec_str = String(u"'exec'", u" ")
compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
- compile_call = Call(Name("compile"), compile_args, "")
+ compile_call = Call(Name(u"compile"), compile_args, u"")
# Finally, replace the execfile call with an exec call.
args = [compile_call]
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
- return Call(Name("exec"), args, prefix=node.prefix)
+ return Call(Name(u"exec"), args, prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_exitfunc.py b/Lib/lib2to3/fixes/fix_exitfunc.py
index 2e47887..3f3fbbf 100644
--- a/Lib/lib2to3/fixes/fix_exitfunc.py
+++ b/Lib/lib2to3/fixes/fix_exitfunc.py
@@ -42,9 +42,9 @@ class FixExitfunc(fixer_base.BaseFix):
return
func = results["func"].clone()
- func.prefix = ""
+ func.prefix = u""
register = pytree.Node(syms.power,
- Attr(Name("atexit"), Name("register"))
+ Attr(Name(u"atexit"), Name(u"register"))
)
call = Call(register, [func], node.prefix)
node.replace(call)
@@ -59,13 +59,13 @@ class FixExitfunc(fixer_base.BaseFix):
names = self.sys_import.children[1]
if names.type == syms.dotted_as_names:
names.append_child(Comma())
- names.append_child(Name("atexit", " "))
+ names.append_child(Name(u"atexit", u" "))
else:
containing_stmt = self.sys_import.parent
position = containing_stmt.children.index(self.sys_import)
stmt_container = containing_stmt.parent
new_import = pytree.Node(syms.import_name,
- [Name("import"), Name("atexit", " ")]
+ [Name(u"import"), Name(u"atexit", u" ")]
)
new = pytree.Node(syms.simple_stmt, [new_import])
containing_stmt.insert_child(position + 1, Newline())
diff --git a/Lib/lib2to3/fixes/fix_filter.py b/Lib/lib2to3/fixes/fix_filter.py
index a7a5a15..18ee2ff 100644
--- a/Lib/lib2to3/fixes/fix_filter.py
+++ b/Lib/lib2to3/fixes/fix_filter.py
@@ -14,11 +14,9 @@ Python 2.6 figure it out.
"""
# Local imports
+from ..pgen2 import token
from .. import fixer_base
-from ..pytree import Node
-from ..pygram import python_symbols as syms
-from ..fixer_util import Name, ArgList, ListComp, in_special_context
-
+from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
@@ -37,19 +35,16 @@ class FixFilter(fixer_base.ConditionalFix):
>
')'
>
- [extra_trailers=trailer*]
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
- [extra_trailers=trailer*]
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
- [extra_trailers=trailer*]
>
"""
@@ -59,32 +54,23 @@ class FixFilter(fixer_base.ConditionalFix):
if self.should_skip(node):
return
- trailers = []
- if 'extra_trailers' in results:
- for t in results['extra_trailers']:
- trailers.append(t.clone())
-
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
- new = Node(syms.power, [new] + trailers, prefix="")
elif "none" in results:
- new = ListComp(Name("_f"),
- Name("_f"),
+ new = ListComp(Name(u"_f"),
+ Name(u"_f"),
results["seq"].clone(),
- Name("_f"))
- new = Node(syms.power, [new] + trailers, prefix="")
+ Name(u"_f"))
else:
if in_special_context(node):
return None
-
- args = results['args'].clone()
- new = Node(syms.power, [Name("filter"), args], prefix="")
- new = Node(syms.power, [Name("list"), ArgList([new])] + trailers)
- new.prefix = ""
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
diff --git a/Lib/lib2to3/fixes/fix_funcattrs.py b/Lib/lib2to3/fixes/fix_funcattrs.py
index 67f3e18..9e45c02 100644
--- a/Lib/lib2to3/fixes/fix_funcattrs.py
+++ b/Lib/lib2to3/fixes/fix_funcattrs.py
@@ -17,5 +17,5 @@ class FixFuncattrs(fixer_base.BaseFix):
def transform(self, node, results):
attr = results["attr"][0]
- attr.replace(Name(("__%s__" % attr.value[5:]),
+ attr.replace(Name((u"__%s__" % attr.value[5:]),
prefix=attr.prefix))
diff --git a/Lib/lib2to3/fixes/fix_getcwdu.py b/Lib/lib2to3/fixes/fix_getcwdu.py
index 087eaed..82233c8 100644
--- a/Lib/lib2to3/fixes/fix_getcwdu.py
+++ b/Lib/lib2to3/fixes/fix_getcwdu.py
@@ -16,4 +16,4 @@ class FixGetcwdu(fixer_base.BaseFix):
def transform(self, node, results):
name = results["name"]
- name.replace(Name("getcwd", prefix=name.prefix))
+ name.replace(Name(u"getcwd", prefix=name.prefix))
diff --git a/Lib/lib2to3/fixes/fix_has_key.py b/Lib/lib2to3/fixes/fix_has_key.py
index 439708c..bead4cb 100644
--- a/Lib/lib2to3/fixes/fix_has_key.py
+++ b/Lib/lib2to3/fixes/fix_has_key.py
@@ -31,6 +31,7 @@ CAVEATS:
# Local imports
from .. import pytree
+from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, parenthesize
@@ -91,10 +92,10 @@ class FixHasKey(fixer_base.BaseFix):
before = before[0]
else:
before = pytree.Node(syms.power, before)
- before.prefix = " "
- n_op = Name("in", prefix=" ")
+ before.prefix = u" "
+ n_op = Name(u"in", prefix=u" ")
if negation:
- n_not = Name("not", prefix=" ")
+ n_not = Name(u"not", prefix=u" ")
n_op = pytree.Node(syms.comp_op, (n_not, n_op))
new = pytree.Node(syms.comparison, (arg, n_op, before))
if after:
diff --git a/Lib/lib2to3/fixes/fix_idioms.py b/Lib/lib2to3/fixes/fix_idioms.py
index 6905913..37b6eef 100644
--- a/Lib/lib2to3/fixes/fix_idioms.py
+++ b/Lib/lib2to3/fixes/fix_idioms.py
@@ -100,18 +100,18 @@ class FixIdioms(fixer_base.BaseFix):
def transform_isinstance(self, node, results):
x = results["x"].clone() # The thing inside of type()
T = results["T"].clone() # The type being compared against
- x.prefix = ""
- T.prefix = " "
- test = Call(Name("isinstance"), [x, Comma(), T])
+ x.prefix = u""
+ T.prefix = u" "
+ test = Call(Name(u"isinstance"), [x, Comma(), T])
if "n" in results:
- test.prefix = " "
- test = Node(syms.not_test, [Name("not"), test])
+ test.prefix = u" "
+ test = Node(syms.not_test, [Name(u"not"), test])
test.prefix = node.prefix
return test
def transform_while(self, node, results):
one = results["while"]
- one.replace(Name("True", prefix=one.prefix))
+ one.replace(Name(u"True", prefix=one.prefix))
def transform_sort(self, node, results):
sort_stmt = results["sort"]
@@ -120,11 +120,11 @@ class FixIdioms(fixer_base.BaseFix):
simple_expr = results.get("expr")
if list_call:
- list_call.replace(Name("sorted", prefix=list_call.prefix))
+ list_call.replace(Name(u"sorted", prefix=list_call.prefix))
elif simple_expr:
new = simple_expr.clone()
- new.prefix = ""
- simple_expr.replace(Call(Name("sorted"), [new],
+ new.prefix = u""
+ simple_expr.replace(Call(Name(u"sorted"), [new],
prefix=simple_expr.prefix))
else:
raise RuntimeError("should not have reached here")
@@ -133,13 +133,13 @@ class FixIdioms(fixer_base.BaseFix):
btwn = sort_stmt.prefix
# Keep any prefix lines between the sort_stmt and the list_call and
# shove them right after the sorted() call.
- if "\n" in btwn:
+ if u"\n" in btwn:
if next_stmt:
# The new prefix should be everything from the sort_stmt's
# prefix up to the last newline, then the old prefix after a new
# line.
- prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix)
- next_stmt[0].prefix = "\n".join(prefix_lines)
+ prefix_lines = (btwn.rpartition(u"\n")[0], next_stmt[0].prefix)
+ next_stmt[0].prefix = u"\n".join(prefix_lines)
else:
assert list_call.parent
assert list_call.next_sibling is None
@@ -149,4 +149,4 @@ class FixIdioms(fixer_base.BaseFix):
assert list_call.next_sibling is end_line
# The new prefix should be everything up to the first new line
# of sort_stmt's prefix.
- end_line.prefix = btwn.rpartition("\n")[0]
+ end_line.prefix = btwn.rpartition(u"\n")[0]
diff --git a/Lib/lib2to3/fixes/fix_import.py b/Lib/lib2to3/fixes/fix_import.py
index 734ca29..88e9d10 100644
--- a/Lib/lib2to3/fixes/fix_import.py
+++ b/Lib/lib2to3/fixes/fix_import.py
@@ -61,7 +61,7 @@ class FixImport(fixer_base.BaseFix):
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
- imp.value = "." + imp.value
+ imp.value = u"." + imp.value
imp.changed()
else:
have_local = False
@@ -78,15 +78,15 @@ class FixImport(fixer_base.BaseFix):
self.warning(node, "absolute and local imports together")
return
- new = FromImport(".", [imp])
+ new = FromImport(u".", [imp])
new.prefix = node.prefix
return new
def probably_a_local_import(self, imp_name):
- if imp_name.startswith("."):
+ if imp_name.startswith(u"."):
# Relative imports are certainly not local imports.
return False
- imp_name = imp_name.split(".", 1)[0]
+ imp_name = imp_name.split(u".", 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package
diff --git a/Lib/lib2to3/fixes/fix_imports.py b/Lib/lib2to3/fixes/fix_imports.py
index aaf4f2f..93c9e67 100644
--- a/Lib/lib2to3/fixes/fix_imports.py
+++ b/Lib/lib2to3/fixes/fix_imports.py
@@ -123,7 +123,7 @@ class FixImports(fixer_base.BaseFix):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
- new_name = self.mapping[mod_name]
+ new_name = unicode(self.mapping[mod_name])
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
diff --git a/Lib/lib2to3/fixes/fix_input.py b/Lib/lib2to3/fixes/fix_input.py
index 9cf9a48..728636b 100644
--- a/Lib/lib2to3/fixes/fix_input.py
+++ b/Lib/lib2to3/fixes/fix_input.py
@@ -22,5 +22,5 @@ class FixInput(fixer_base.BaseFix):
return
new = node.clone()
- new.prefix = ""
- return Call(Name("eval"), [new], prefix=node.prefix)
+ new.prefix = u""
+ return Call(Name(u"eval"), [new], prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_intern.py b/Lib/lib2to3/fixes/fix_intern.py
index d752843..285c126 100644
--- a/Lib/lib2to3/fixes/fix_intern.py
+++ b/Lib/lib2to3/fixes/fix_intern.py
@@ -6,8 +6,9 @@
intern(s) -> sys.intern(s)"""
# Local imports
+from .. import pytree
from .. import fixer_base
-from ..fixer_util import ImportAndCall, touch_import
+from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
@@ -30,10 +31,26 @@ class FixIntern(fixer_base.BaseFix):
# PATTERN above but I don't know how to do it so...
obj = results['obj']
if obj:
+ if obj.type == self.syms.star_expr:
+ return # Make no change.
if (obj.type == self.syms.argument and
- obj.children[0].value in {'**', '*'}):
+ obj.children[0].value == '**'):
return # Make no change.
- names = ('sys', 'intern')
- new = ImportAndCall(node, results, names)
- touch_import(None, 'sys', node)
+ syms = self.syms
+ obj = results["obj"].clone()
+ if obj.type == syms.arglist:
+ newarglist = obj.clone()
+ else:
+ newarglist = pytree.Node(syms.arglist, [obj.clone()])
+ after = results["after"]
+ if after:
+ after = [n.clone() for n in after]
+ new = pytree.Node(syms.power,
+ Attr(Name(u"sys"), Name(u"intern")) +
+ [pytree.Node(syms.trailer,
+ [results["lpar"].clone(),
+ newarglist,
+ results["rpar"].clone()])] + after)
+ new.prefix = node.prefix
+ touch_import(None, u'sys', node)
return new
diff --git a/Lib/lib2to3/fixes/fix_isinstance.py b/Lib/lib2to3/fixes/fix_isinstance.py
index bebb1de..4b04c8f 100644
--- a/Lib/lib2to3/fixes/fix_isinstance.py
+++ b/Lib/lib2to3/fixes/fix_isinstance.py
@@ -35,7 +35,7 @@ class FixIsinstance(fixer_base.BaseFix):
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
- next(iterator)
+ iterator.next()
continue
else:
new_args.append(arg)
diff --git a/Lib/lib2to3/fixes/fix_itertools.py b/Lib/lib2to3/fixes/fix_itertools.py
index 8e78d6c..067641b 100644
--- a/Lib/lib2to3/fixes/fix_itertools.py
+++ b/Lib/lib2to3/fixes/fix_itertools.py
@@ -29,7 +29,7 @@ class FixItertools(fixer_base.BaseFix):
prefix = None
func = results['func'][0]
if ('it' in results and
- func.value not in ('ifilterfalse', 'izip_longest')):
+ func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
diff --git a/Lib/lib2to3/fixes/fix_itertools_imports.py b/Lib/lib2to3/fixes/fix_itertools_imports.py
index 0ddbc7b..28610cf 100644
--- a/Lib/lib2to3/fixes/fix_itertools_imports.py
+++ b/Lib/lib2to3/fixes/fix_itertools_imports.py
@@ -28,13 +28,13 @@ class FixItertoolsImports(fixer_base.BaseFix):
assert child.type == syms.import_as_name
name_node = child.children[0]
member_name = name_node.value
- if member_name in ('imap', 'izip', 'ifilter'):
+ if member_name in (u'imap', u'izip', u'ifilter'):
child.value = None
child.remove()
- elif member_name in ('ifilterfalse', 'izip_longest'):
+ elif member_name in (u'ifilterfalse', u'izip_longest'):
node.changed()
- name_node.value = ('filterfalse' if member_name[1] == 'f'
- else 'zip_longest')
+ name_node.value = (u'filterfalse' if member_name[1] == u'f'
+ else u'zip_longest')
# Make sure the import statement is still sane
children = imports.children[:] or [imports]
diff --git a/Lib/lib2to3/fixes/fix_long.py b/Lib/lib2to3/fixes/fix_long.py
index f227c9f..5dddde0 100644
--- a/Lib/lib2to3/fixes/fix_long.py
+++ b/Lib/lib2to3/fixes/fix_long.py
@@ -15,5 +15,5 @@ class FixLong(fixer_base.BaseFix):
def transform(self, node, results):
if is_probably_builtin(node):
- node.value = "int"
+ node.value = u"int"
node.changed()
diff --git a/Lib/lib2to3/fixes/fix_map.py b/Lib/lib2to3/fixes/fix_map.py
index 78cf81c..7a7d0db 100644
--- a/Lib/lib2to3/fixes/fix_map.py
+++ b/Lib/lib2to3/fixes/fix_map.py
@@ -22,10 +22,8 @@ soon as the shortest argument is exhausted.
# Local imports
from ..pgen2 import token
from .. import fixer_base
-from ..fixer_util import Name, ArgList, Call, ListComp, in_special_context
+from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
-from ..pytree import Node
-
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
@@ -34,7 +32,6 @@ class FixMap(fixer_base.ConditionalFix):
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
- [extra_trailers=trailer*]
>
|
map_lambda=power<
@@ -50,12 +47,10 @@ class FixMap(fixer_base.ConditionalFix):
>
')'
>
- [extra_trailers=trailer*]
>
|
power<
- 'map' args=trailer< '(' [any] ')' >
- [extra_trailers=trailer*]
+ 'map' trailer< '(' [arglist=any] ')' >
>
"""
@@ -65,46 +60,32 @@ class FixMap(fixer_base.ConditionalFix):
if self.should_skip(node):
return
- trailers = []
- if 'extra_trailers' in results:
- for t in results['extra_trailers']:
- trailers.append(t.clone())
-
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
- new.prefix = ""
- new = Call(Name("list"), [new])
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
- new = Node(syms.power, [new] + trailers, prefix="")
-
else:
if "map_none" in results:
new = results["arg"].clone()
- new.prefix = ""
else:
- if "args" in results:
- args = results["args"]
- if args.type == syms.trailer and \
- args.children[1].type == syms.arglist and \
- args.children[1].children[0].type == token.NAME and \
- args.children[1].children[0].value == "None":
+ if "arglist" in results:
+ args = results["arglist"]
+ if args.type == syms.arglist and \
+ args.children[0].type == token.NAME and \
+ args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
-
- new = Node(syms.power, [Name("map"), args.clone()])
- new.prefix = ""
-
if in_special_context(node):
return None
-
- new = Node(syms.power, [Name("list"), ArgList([new])] + trailers)
- new.prefix = ""
-
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
diff --git a/Lib/lib2to3/fixes/fix_metaclass.py b/Lib/lib2to3/fixes/fix_metaclass.py
index d1cd10d..45f9937 100644
--- a/Lib/lib2to3/fixes/fix_metaclass.py
+++ b/Lib/lib2to3/fixes/fix_metaclass.py
@@ -1,6 +1,6 @@
"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
- The various forms of classef (inherits nothing, inherits once, inherits
+ The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
@@ -20,7 +20,7 @@
# Local imports
from .. import fixer_base
from ..pygram import token
-from ..fixer_util import syms, Node, Leaf
+from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
@@ -113,7 +113,7 @@ def find_metas(cls_node):
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
- left_node.value == '__metaclass__':
+ left_node.value == u'__metaclass__':
# We found an assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
@@ -136,7 +136,7 @@ def fixup_indent(suite):
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
- node.prefix = ''
+ node.prefix = u''
return
else:
kids.extend(node.children[::-1])
@@ -183,9 +183,9 @@ class FixMetaclass(fixer_base.BaseFix):
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
- node.insert_child(2, Leaf(token.RPAR, ')'))
+ node.insert_child(2, Leaf(token.RPAR, u')'))
node.insert_child(2, arglist)
- node.insert_child(2, Leaf(token.LPAR, '('))
+ node.insert_child(2, Leaf(token.LPAR, u'('))
else:
raise ValueError("Unexpected class definition")
@@ -195,16 +195,16 @@ class FixMetaclass(fixer_base.BaseFix):
orig_meta_prefix = meta_txt.prefix
if arglist.children:
- arglist.append_child(Leaf(token.COMMA, ','))
- meta_txt.prefix = ' '
+ arglist.append_child(Leaf(token.COMMA, u','))
+ meta_txt.prefix = u' '
else:
- meta_txt.prefix = ''
+ meta_txt.prefix = u''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
- expr_stmt.children[1].prefix = ''
- expr_stmt.children[2].prefix = ''
+ expr_stmt.children[1].prefix = u''
+ expr_stmt.children[2].prefix = u''
arglist.append_child(last_metaclass)
@@ -214,15 +214,15 @@ class FixMetaclass(fixer_base.BaseFix):
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
- pass_leaf = Leaf(text_type, 'pass')
+ pass_leaf = Leaf(text_type, u'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
- node.append_child(Leaf(token.NEWLINE, '\n'))
+ node.append_child(Leaf(token.NEWLINE, u'\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
- pass_leaf = Leaf(text_type, 'pass')
+ pass_leaf = Leaf(text_type, u'pass')
suite.insert_child(-1, pass_leaf)
- suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
+ suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
diff --git a/Lib/lib2to3/fixes/fix_methodattrs.py b/Lib/lib2to3/fixes/fix_methodattrs.py
index 7f9004f..f3c1ecf 100644
--- a/Lib/lib2to3/fixes/fix_methodattrs.py
+++ b/Lib/lib2to3/fixes/fix_methodattrs.py
@@ -20,5 +20,5 @@ class FixMethodattrs(fixer_base.BaseFix):
def transform(self, node, results):
attr = results["attr"][0]
- new = MAP[attr.value]
+ new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
diff --git a/Lib/lib2to3/fixes/fix_ne.py b/Lib/lib2to3/fixes/fix_ne.py
index e3ee10f..7025980 100644
--- a/Lib/lib2to3/fixes/fix_ne.py
+++ b/Lib/lib2to3/fixes/fix_ne.py
@@ -16,8 +16,8 @@ class FixNe(fixer_base.BaseFix):
def match(self, node):
# Override
- return node.value == "<>"
+ return node.value == u"<>"
def transform(self, node, results):
- new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix)
+ new = pytree.Leaf(token.NOTEQUAL, u"!=", prefix=node.prefix)
return new
diff --git a/Lib/lib2to3/fixes/fix_next.py b/Lib/lib2to3/fixes/fix_next.py
index 9f6305e..f021a9b 100644
--- a/Lib/lib2to3/fixes/fix_next.py
+++ b/Lib/lib2to3/fixes/fix_next.py
@@ -36,7 +36,7 @@ class FixNext(fixer_base.BaseFix):
def start_tree(self, tree, filename):
super(FixNext, self).start_tree(tree, filename)
- n = find_binding('next', tree)
+ n = find_binding(u'next', tree)
if n:
self.warning(n, bind_warning)
self.shadowed_next = True
@@ -52,13 +52,13 @@ class FixNext(fixer_base.BaseFix):
if base:
if self.shadowed_next:
- attr.replace(Name("__next__", prefix=attr.prefix))
+ attr.replace(Name(u"__next__", prefix=attr.prefix))
else:
base = [n.clone() for n in base]
- base[0].prefix = ""
- node.replace(Call(Name("next", prefix=node.prefix), base))
+ base[0].prefix = u""
+ node.replace(Call(Name(u"next", prefix=node.prefix), base))
elif name:
- n = Name("__next__", prefix=name.prefix)
+ n = Name(u"__next__", prefix=name.prefix)
name.replace(n)
elif attr:
# We don't do this transformation if we're assigning to "x.next".
@@ -66,10 +66,10 @@ class FixNext(fixer_base.BaseFix):
# so it's being done here.
if is_assign_target(node):
head = results["head"]
- if "".join([str(n) for n in head]).strip() == '__builtin__':
+ if "".join([str(n) for n in head]).strip() == u'__builtin__':
self.warning(node, bind_warning)
return
- attr.replace(Name("__next__"))
+ attr.replace(Name(u"__next__"))
elif "global" in results:
self.warning(node, bind_warning)
self.shadowed_next = True
diff --git a/Lib/lib2to3/fixes/fix_nonzero.py b/Lib/lib2to3/fixes/fix_nonzero.py
index c229596..ba83478 100644
--- a/Lib/lib2to3/fixes/fix_nonzero.py
+++ b/Lib/lib2to3/fixes/fix_nonzero.py
@@ -3,7 +3,7 @@
# Local imports
from .. import fixer_base
-from ..fixer_util import Name
+from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
@@ -17,5 +17,5 @@ class FixNonzero(fixer_base.BaseFix):
def transform(self, node, results):
name = results["name"]
- new = Name("__bool__", prefix=name.prefix)
+ new = Name(u"__bool__", prefix=name.prefix)
name.replace(new)
diff --git a/Lib/lib2to3/fixes/fix_numliterals.py b/Lib/lib2to3/fixes/fix_numliterals.py
index 79207d4..b0c23f8 100644
--- a/Lib/lib2to3/fixes/fix_numliterals.py
+++ b/Lib/lib2to3/fixes/fix_numliterals.py
@@ -16,13 +16,13 @@ class FixNumliterals(fixer_base.BaseFix):
def match(self, node):
# Override
- return (node.value.startswith("0") or node.value[-1] in "Ll")
+ return (node.value.startswith(u"0") or node.value[-1] in u"Ll")
def transform(self, node, results):
val = node.value
- if val[-1] in 'Ll':
+ if val[-1] in u'Ll':
val = val[:-1]
- elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
- val = "0o" + val[1:]
+ elif val.startswith(u'0') and val.isdigit() and len(set(val)) > 1:
+ val = u"0o" + val[1:]
return Number(val, prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_operator.py b/Lib/lib2to3/fixes/fix_operator.py
index d303cd2..7bf2c0d 100644
--- a/Lib/lib2to3/fixes/fix_operator.py
+++ b/Lib/lib2to3/fixes/fix_operator.py
@@ -1,16 +1,14 @@
"""Fixer for operator functions.
-operator.isCallable(obj) -> callable(obj)
+operator.isCallable(obj) -> hasattr(obj, '__call__')
operator.sequenceIncludes(obj) -> operator.contains(obj)
-operator.isSequenceType(obj) -> isinstance(obj, collections.abc.Sequence)
-operator.isMappingType(obj) -> isinstance(obj, collections.abc.Mapping)
+operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence)
+operator.isMappingType(obj) -> isinstance(obj, collections.Mapping)
operator.isNumberType(obj) -> isinstance(obj, numbers.Number)
operator.repeat(obj, n) -> operator.mul(obj, n)
operator.irepeat(obj, n) -> operator.imul(obj, n)
"""
-import collections.abc
-
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, touch_import
@@ -47,32 +45,33 @@ class FixOperator(fixer_base.BaseFix):
@invocation("operator.contains(%s)")
def _sequenceIncludes(self, node, results):
- return self._handle_rename(node, results, "contains")
+ return self._handle_rename(node, results, u"contains")
- @invocation("callable(%s)")
+ @invocation("hasattr(%s, '__call__')")
def _isCallable(self, node, results):
obj = results["obj"]
- return Call(Name("callable"), [obj.clone()], prefix=node.prefix)
+ args = [obj.clone(), String(u", "), String(u"'__call__'")]
+ return Call(Name(u"hasattr"), args, prefix=node.prefix)
@invocation("operator.mul(%s)")
def _repeat(self, node, results):
- return self._handle_rename(node, results, "mul")
+ return self._handle_rename(node, results, u"mul")
@invocation("operator.imul(%s)")
def _irepeat(self, node, results):
- return self._handle_rename(node, results, "imul")
+ return self._handle_rename(node, results, u"imul")
- @invocation("isinstance(%s, collections.abc.Sequence)")
+ @invocation("isinstance(%s, collections.Sequence)")
def _isSequenceType(self, node, results):
- return self._handle_type2abc(node, results, "collections.abc", "Sequence")
+ return self._handle_type2abc(node, results, u"collections", u"Sequence")
- @invocation("isinstance(%s, collections.abc.Mapping)")
+ @invocation("isinstance(%s, collections.Mapping)")
def _isMappingType(self, node, results):
- return self._handle_type2abc(node, results, "collections.abc", "Mapping")
+ return self._handle_type2abc(node, results, u"collections", u"Mapping")
@invocation("isinstance(%s, numbers.Number)")
def _isNumberType(self, node, results):
- return self._handle_type2abc(node, results, "numbers", "Number")
+ return self._handle_type2abc(node, results, u"numbers", u"Number")
def _handle_rename(self, node, results, name):
method = results["method"][0]
@@ -82,16 +81,16 @@ class FixOperator(fixer_base.BaseFix):
def _handle_type2abc(self, node, results, module, abc):
touch_import(None, module, node)
obj = results["obj"]
- args = [obj.clone(), String(", " + ".".join([module, abc]))]
- return Call(Name("isinstance"), args, prefix=node.prefix)
+ args = [obj.clone(), String(u", " + u".".join([module, abc]))]
+ return Call(Name(u"isinstance"), args, prefix=node.prefix)
def _check_method(self, node, results):
- method = getattr(self, "_" + results["method"][0].value)
- if isinstance(method, collections.abc.Callable):
+ method = getattr(self, "_" + results["method"][0].value.encode("ascii"))
+ if callable(method):
if "module" in results:
return method
else:
- sub = (str(results["obj"]),)
- invocation_str = method.invocation % sub
- self.warning(node, "You should use '%s' here." % invocation_str)
+ sub = (unicode(results["obj"]),)
+ invocation_str = unicode(method.invocation) % sub
+ self.warning(node, u"You should use '%s' here." % invocation_str)
return None
diff --git a/Lib/lib2to3/fixes/fix_paren.py b/Lib/lib2to3/fixes/fix_paren.py
index b205aa7..8650cd9 100644
--- a/Lib/lib2to3/fixes/fix_paren.py
+++ b/Lib/lib2to3/fixes/fix_paren.py
@@ -39,6 +39,6 @@ class FixParen(fixer_base.BaseFix):
lparen = LParen()
lparen.prefix = target.prefix
- target.prefix = "" # Make it hug the parentheses
+ target.prefix = u"" # Make it hug the parentheses
target.insert_child(0, lparen)
target.append_child(RParen())
diff --git a/Lib/lib2to3/fixes/fix_print.py b/Lib/lib2to3/fixes/fix_print.py
index 8780322..98786b3 100644
--- a/Lib/lib2to3/fixes/fix_print.py
+++ b/Lib/lib2to3/fixes/fix_print.py
@@ -18,7 +18,7 @@ from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
-from ..fixer_util import Name, Call, Comma, String
+from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
@@ -41,10 +41,10 @@ class FixPrint(fixer_base.BaseFix):
if bare_print:
# Special-case print all by itself
- bare_print.replace(Call(Name("print"), [],
+ bare_print.replace(Call(Name(u"print"), [],
prefix=bare_print.prefix))
return
- assert node.children[0] == Name("print")
+ assert node.children[0] == Name(u"print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
@@ -55,33 +55,33 @@ class FixPrint(fixer_base.BaseFix):
if args and args[-1] == Comma():
args = args[:-1]
end = " "
- if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
+ if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
- l_args[0].prefix = ""
+ l_args[0].prefix = u""
if sep is not None or end is not None or file is not None:
if sep is not None:
- self.add_kwarg(l_args, "sep", String(repr(sep)))
+ self.add_kwarg(l_args, u"sep", String(repr(sep)))
if end is not None:
- self.add_kwarg(l_args, "end", String(repr(end)))
+ self.add_kwarg(l_args, u"end", String(repr(end)))
if file is not None:
- self.add_kwarg(l_args, "file", file)
- n_stmt = Call(Name("print"), l_args)
+ self.add_kwarg(l_args, u"file", file)
+ n_stmt = Call(Name(u"print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
- n_expr.prefix = ""
+ n_expr.prefix = u""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
- pytree.Leaf(token.EQUAL, "="),
+ pytree.Leaf(token.EQUAL, u"="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
- n_argument.prefix = " "
+ n_argument.prefix = u" "
l_nodes.append(n_argument)
diff --git a/Lib/lib2to3/fixes/fix_raise.py b/Lib/lib2to3/fixes/fix_raise.py
index 05aa21e..b958ba0 100644
--- a/Lib/lib2to3/fixes/fix_raise.py
+++ b/Lib/lib2to3/fixes/fix_raise.py
@@ -55,11 +55,11 @@ class FixRaise(fixer_base.BaseFix):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
- exc.prefix = " "
+ exc.prefix = u" "
if "val" not in results:
# One-argument raise
- new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
+ new = pytree.Node(syms.raise_stmt, [Name(u"raise"), exc])
new.prefix = node.prefix
return new
@@ -67,24 +67,24 @@ class FixRaise(fixer_base.BaseFix):
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
- val.prefix = ""
+ val.prefix = u""
args = [val]
if "tb" in results:
tb = results["tb"].clone()
- tb.prefix = ""
+ tb.prefix = u""
e = exc
# If there's a traceback and None is passed as the value, then don't
# add a call, since the user probably just wants to add a
# traceback. See issue #9661.
- if val.type != token.NAME or val.value != "None":
+ if val.type != token.NAME or val.value != u"None":
e = Call(exc, args)
- with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
- new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
+ with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])]
+ new = pytree.Node(syms.simple_stmt, [Name(u"raise")] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt,
- [Name("raise"), Call(exc, args)],
+ [Name(u"raise"), Call(exc, args)],
prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_raw_input.py b/Lib/lib2to3/fixes/fix_raw_input.py
index a51bb69..3a73b81 100644
--- a/Lib/lib2to3/fixes/fix_raw_input.py
+++ b/Lib/lib2to3/fixes/fix_raw_input.py
@@ -14,4 +14,4 @@ class FixRawInput(fixer_base.BaseFix):
def transform(self, node, results):
name = results["name"]
- name.replace(Name("input", prefix=name.prefix))
+ name.replace(Name(u"input", prefix=name.prefix))
diff --git a/Lib/lib2to3/fixes/fix_reduce.py b/Lib/lib2to3/fixes/fix_reduce.py
index 00e5aa1..6bd785c 100644
--- a/Lib/lib2to3/fixes/fix_reduce.py
+++ b/Lib/lib2to3/fixes/fix_reduce.py
@@ -32,4 +32,4 @@ class FixReduce(fixer_base.BaseFix):
"""
def transform(self, node, results):
- touch_import('functools', 'reduce', node)
+ touch_import(u'functools', u'reduce', node)
diff --git a/Lib/lib2to3/fixes/fix_reload.py b/Lib/lib2to3/fixes/fix_reload.py
deleted file mode 100644
index b308411..0000000
--- a/Lib/lib2to3/fixes/fix_reload.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Fixer for reload().
-
-reload(s) -> importlib.reload(s)"""
-
-# Local imports
-from .. import fixer_base
-from ..fixer_util import ImportAndCall, touch_import
-
-
-class FixReload(fixer_base.BaseFix):
- BM_compatible = True
- order = "pre"
-
- PATTERN = """
- power< 'reload'
- trailer< lpar='('
- ( not(arglist | argument<any '=' any>) obj=any
- | obj=arglist<(not argument<any '=' any>) any ','> )
- rpar=')' >
- after=any*
- >
- """
-
- def transform(self, node, results):
- if results:
- # I feel like we should be able to express this logic in the
- # PATTERN above but I don't know how to do it so...
- obj = results['obj']
- if obj:
- if (obj.type == self.syms.argument and
- obj.children[0].value in {'**', '*'}):
- return # Make no change.
- names = ('importlib', 'reload')
- new = ImportAndCall(node, results, names)
- touch_import(None, 'importlib', node)
- return new
diff --git a/Lib/lib2to3/fixes/fix_renames.py b/Lib/lib2to3/fixes/fix_renames.py
index c0e3705..4bcce8c 100644
--- a/Lib/lib2to3/fixes/fix_renames.py
+++ b/Lib/lib2to3/fixes/fix_renames.py
@@ -20,8 +20,8 @@ def alternates(members):
def build_pattern():
#bare = set()
- for module, replace in list(MAPPING.items()):
- for old_attr, new_attr in list(replace.items()):
+ for module, replace in MAPPING.items():
+ for old_attr, new_attr in replace.items():
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
@@ -66,5 +66,5 @@ class FixRenames(fixer_base.BaseFix):
#import_mod = results.get("module")
if mod_name and attr_name:
- new_attr = LOOKUP[(mod_name.value, attr_name.value)]
+ new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)])
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
diff --git a/Lib/lib2to3/fixes/fix_repr.py b/Lib/lib2to3/fixes/fix_repr.py
index 1150bb8..f343656 100644
--- a/Lib/lib2to3/fixes/fix_repr.py
+++ b/Lib/lib2to3/fixes/fix_repr.py
@@ -20,4 +20,4 @@ class FixRepr(fixer_base.BaseFix):
if expr.type == self.syms.testlist1:
expr = parenthesize(expr)
- return Call(Name("repr"), [expr], prefix=node.prefix)
+ return Call(Name(u"repr"), [expr], prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_set_literal.py b/Lib/lib2to3/fixes/fix_set_literal.py
index 762550c..d3d38ec 100644
--- a/Lib/lib2to3/fixes/fix_set_literal.py
+++ b/Lib/lib2to3/fixes/fix_set_literal.py
@@ -35,9 +35,9 @@ class FixSetLiteral(fixer_base.BaseFix):
items = results["items"]
# Build the contents of the literal
- literal = [pytree.Leaf(token.LBRACE, "{")]
+ literal = [pytree.Leaf(token.LBRACE, u"{")]
literal.extend(n.clone() for n in items.children)
- literal.append(pytree.Leaf(token.RBRACE, "}"))
+ literal.append(pytree.Leaf(token.RBRACE, u"}"))
# Set the prefix of the right brace to that of the ')' or ']'
literal[-1].prefix = items.next_sibling.prefix
maker = pytree.Node(syms.dictsetmaker, literal)
diff --git a/Lib/lib2to3/fixes/fix_standarderror.py b/Lib/lib2to3/fixes/fix_standarderror.py
index dc74216..6cad511 100644
--- a/Lib/lib2to3/fixes/fix_standarderror.py
+++ b/Lib/lib2to3/fixes/fix_standarderror.py
@@ -15,4 +15,4 @@ class FixStandarderror(fixer_base.BaseFix):
"""
def transform(self, node, results):
- return Name("Exception", prefix=node.prefix)
+ return Name(u"Exception", prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_sys_exc.py b/Lib/lib2to3/fixes/fix_sys_exc.py
index f603969..2ecca2b 100644
--- a/Lib/lib2to3/fixes/fix_sys_exc.py
+++ b/Lib/lib2to3/fixes/fix_sys_exc.py
@@ -13,7 +13,7 @@ from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
class FixSysExc(fixer_base.BaseFix):
# This order matches the ordering of sys.exc_info().
- exc_info = ["exc_type", "exc_value", "exc_traceback"]
+ exc_info = [u"exc_type", u"exc_value", u"exc_traceback"]
BM_compatible = True
PATTERN = """
power< 'sys' trailer< dot='.' attribute=(%s) > >
@@ -23,8 +23,8 @@ class FixSysExc(fixer_base.BaseFix):
sys_attr = results["attribute"][0]
index = Number(self.exc_info.index(sys_attr.value))
- call = Call(Name("exc_info"), prefix=sys_attr.prefix)
- attr = Attr(Name("sys"), call)
+ call = Call(Name(u"exc_info"), prefix=sys_attr.prefix)
+ attr = Attr(Name(u"sys"), call)
attr[1].children[0].prefix = results["dot"].prefix
attr.append(Subscript(index))
return Node(syms.power, attr, prefix=node.prefix)
diff --git a/Lib/lib2to3/fixes/fix_throw.py b/Lib/lib2to3/fixes/fix_throw.py
index aac2916..1468d89 100644
--- a/Lib/lib2to3/fixes/fix_throw.py
+++ b/Lib/lib2to3/fixes/fix_throw.py
@@ -32,7 +32,7 @@ class FixThrow(fixer_base.BaseFix):
return
# Leave "g.throw(E)" alone
- val = results.get("val")
+ val = results.get(u"val")
if val is None:
return
@@ -40,17 +40,17 @@ class FixThrow(fixer_base.BaseFix):
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
- val.prefix = ""
+ val.prefix = u""
args = [val]
throw_args = results["args"]
if "tb" in results:
tb = results["tb"].clone()
- tb.prefix = ""
+ tb.prefix = u""
e = Call(exc, args)
- with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+ with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])]
throw_args.replace(pytree.Node(syms.power, with_tb))
else:
throw_args.replace(Call(exc, args))
diff --git a/Lib/lib2to3/fixes/fix_tuple_params.py b/Lib/lib2to3/fixes/fix_tuple_params.py
index cad755f..6361717 100644
--- a/Lib/lib2to3/fixes/fix_tuple_params.py
+++ b/Lib/lib2to3/fixes/fix_tuple_params.py
@@ -58,8 +58,8 @@ class FixTupleParams(fixer_base.BaseFix):
end = Newline()
else:
start = 0
- indent = "; "
- end = pytree.Leaf(token.INDENT, "")
+ indent = u"; "
+ end = pytree.Leaf(token.INDENT, u"")
# We need access to self for new_name(), and making this a method
# doesn't feel right. Closing over self and new_lines makes the
@@ -67,10 +67,10 @@ class FixTupleParams(fixer_base.BaseFix):
def handle_tuple(tuple_arg, add_prefix=False):
n = Name(self.new_name())
arg = tuple_arg.clone()
- arg.prefix = ""
+ arg.prefix = u""
stmt = Assign(arg, n.clone())
if add_prefix:
- n.prefix = " "
+ n.prefix = u" "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
@@ -95,7 +95,7 @@ class FixTupleParams(fixer_base.BaseFix):
# TODO(cwinter) suite-cleanup
after = start
if start == 0:
- new_lines[0].prefix = " "
+ new_lines[0].prefix = u" "
elif is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
@@ -115,7 +115,7 @@ class FixTupleParams(fixer_base.BaseFix):
# Replace lambda ((((x)))): x with lambda x: x
if inner.type == token.NAME:
inner = inner.clone()
- inner.prefix = " "
+ inner.prefix = u" "
args.replace(inner)
return
@@ -123,7 +123,7 @@ class FixTupleParams(fixer_base.BaseFix):
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
- new_param = Name(tup_name, prefix=" ")
+ new_param = Name(tup_name, prefix=u" ")
args.replace(new_param.clone())
for n in body.post_order():
if n.type == token.NAME and n.value in to_index:
@@ -158,7 +158,7 @@ def map_to_index(param_list, prefix=[], d=None):
if d is None:
d = {}
for i, obj in enumerate(param_list):
- trailer = [Subscript(Number(str(i)))]
+ trailer = [Subscript(Number(unicode(i)))]
if isinstance(obj, list):
map_to_index(obj, trailer, d=d)
else:
@@ -172,4 +172,4 @@ def tuple_name(param_list):
l.append(tuple_name(obj))
else:
l.append(obj)
- return "_".join(l)
+ return u"_".join(l)
diff --git a/Lib/lib2to3/fixes/fix_types.py b/Lib/lib2to3/fixes/fix_types.py
index 67bf51f..baaeabd 100644
--- a/Lib/lib2to3/fixes/fix_types.py
+++ b/Lib/lib2to3/fixes/fix_types.py
@@ -20,6 +20,7 @@ There should be another fixer that handles at least the following constants:
"""
# Local imports
+from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
@@ -55,7 +56,7 @@ class FixTypes(fixer_base.BaseFix):
PATTERN = '|'.join(_pats)
def transform(self, node, results):
- new_value = _TYPE_MAPPING.get(results["name"].value)
+ new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
diff --git a/Lib/lib2to3/fixes/fix_unicode.py b/Lib/lib2to3/fixes/fix_unicode.py
index c7982c2..2d776f6 100644
--- a/Lib/lib2to3/fixes/fix_unicode.py
+++ b/Lib/lib2to3/fixes/fix_unicode.py
@@ -11,7 +11,7 @@ r"""Fixer for unicode.
from ..pgen2 import token
from .. import fixer_base
-_mapping = {"unichr" : "chr", "unicode" : "str"}
+_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
@@ -28,12 +28,12 @@ class FixUnicode(fixer_base.BaseFix):
return new
elif node.type == token.STRING:
val = node.value
- if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
- val = r'\\'.join([
- v.replace('\\u', r'\\u').replace('\\U', r'\\U')
- for v in val.split(r'\\')
+ if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
+ val = ur'\\'.join([
+ v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
+ for v in val.split(ur'\\')
])
- if val[0] in 'uU':
+ if val[0] in u'uU':
val = val[1:]
if val == node.value:
return node
diff --git a/Lib/lib2to3/fixes/fix_urllib.py b/Lib/lib2to3/fixes/fix_urllib.py
index 5a36049..34e1b27 100644
--- a/Lib/lib2to3/fixes/fix_urllib.py
+++ b/Lib/lib2to3/fixes/fix_urllib.py
@@ -6,6 +6,7 @@
# Local imports
from lib2to3.fixes.fix_imports import alternates, FixImports
+from lib2to3 import fixer_base
from lib2to3.fixer_util import (Name, Comma, FromImport, Newline,
find_indentation, Node, syms)
@@ -127,7 +128,7 @@ class FixUrllib(FixImports):
else:
member_name = member.value
as_name = None
- if member_name != ",":
+ if member_name != u",":
for change in MAPPING[mod_member.value]:
if member_name in change[1]:
if change[0] not in mod_dict:
diff --git a/Lib/lib2to3/fixes/fix_ws_comma.py b/Lib/lib2to3/fixes/fix_ws_comma.py
index a54a376..37ff624 100644
--- a/Lib/lib2to3/fixes/fix_ws_comma.py
+++ b/Lib/lib2to3/fixes/fix_ws_comma.py
@@ -17,8 +17,8 @@ class FixWsComma(fixer_base.BaseFix):
any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
"""
- COMMA = pytree.Leaf(token.COMMA, ",")
- COLON = pytree.Leaf(token.COLON, ":")
+ COMMA = pytree.Leaf(token.COMMA, u",")
+ COLON = pytree.Leaf(token.COLON, u":")
SEPS = (COMMA, COLON)
def transform(self, node, results):
@@ -27,13 +27,13 @@ class FixWsComma(fixer_base.BaseFix):
for child in new.children:
if child in self.SEPS:
prefix = child.prefix
- if prefix.isspace() and "\n" not in prefix:
- child.prefix = ""
+ if prefix.isspace() and u"\n" not in prefix:
+ child.prefix = u""
comma = True
else:
if comma:
prefix = child.prefix
if not prefix:
- child.prefix = " "
+ child.prefix = u" "
comma = False
return new
diff --git a/Lib/lib2to3/fixes/fix_xrange.py b/Lib/lib2to3/fixes/fix_xrange.py
index 1e491e1..f143672 100644
--- a/Lib/lib2to3/fixes/fix_xrange.py
+++ b/Lib/lib2to3/fixes/fix_xrange.py
@@ -26,25 +26,25 @@ class FixXrange(fixer_base.BaseFix):
def transform(self, node, results):
name = results["name"]
- if name.value == "xrange":
+ if name.value == u"xrange":
return self.transform_xrange(node, results)
- elif name.value == "range":
+ elif name.value == u"range":
return self.transform_range(node, results)
else:
raise ValueError(repr(name))
def transform_xrange(self, node, results):
name = results["name"]
- name.replace(Name("range", prefix=name.prefix))
+ name.replace(Name(u"range", prefix=name.prefix))
# This prevents the new range call from being wrapped in a list later.
self.transformed_xranges.add(id(node))
def transform_range(self, node, results):
if (id(node) not in self.transformed_xranges and
not self.in_special_context(node)):
- range_call = Call(Name("range"), [results["args"].clone()])
+ range_call = Call(Name(u"range"), [results["args"].clone()])
# Encase the range call in list().
- list_call = Call(Name("list"), [range_call],
+ list_call = Call(Name(u"list"), [range_call],
prefix=node.prefix)
# Put things that were after the range() call after the list call.
for n in results["rest"]:
diff --git a/Lib/lib2to3/fixes/fix_xreadlines.py b/Lib/lib2to3/fixes/fix_xreadlines.py
index 3e3f71a..f50b9a2 100644
--- a/Lib/lib2to3/fixes/fix_xreadlines.py
+++ b/Lib/lib2to3/fixes/fix_xreadlines.py
@@ -20,6 +20,6 @@ class FixXreadlines(fixer_base.BaseFix):
no_call = results.get("no_call")
if no_call:
- no_call.replace(Name("__iter__", prefix=no_call.prefix))
+ no_call.replace(Name(u"__iter__", prefix=no_call.prefix))
else:
node.replace([x.clone() for x in results["call"]])
diff --git a/Lib/lib2to3/fixes/fix_zip.py b/Lib/lib2to3/fixes/fix_zip.py
index 52c28df..c5d7b66 100644
--- a/Lib/lib2to3/fixes/fix_zip.py
+++ b/Lib/lib2to3/fixes/fix_zip.py
@@ -9,16 +9,13 @@ iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
# Local imports
from .. import fixer_base
-from ..pytree import Node
-from ..pygram import python_symbols as syms
-from ..fixer_util import Name, ArgList, in_special_context
-
+from ..fixer_util import Name, Call, in_special_context
class FixZip(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
- power< 'zip' args=trailer< '(' [any] ')' > [trailers=trailer*]
+ power< 'zip' args=trailer< '(' [any] ')' >
>
"""
@@ -31,16 +28,8 @@ class FixZip(fixer_base.ConditionalFix):
if in_special_context(node):
return None
- args = results['args'].clone()
- args.prefix = ""
-
- trailers = []
- if 'trailers' in results:
- trailers = [n.clone() for n in results['trailers']]
- for n in trailers:
- n.prefix = ""
-
- new = Node(syms.power, [Name("zip"), args], prefix="")
- new = Node(syms.power, [Name("list"), ArgList([new])] + trailers)
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
diff --git a/Lib/lib2to3/main.py b/Lib/lib2to3/main.py
index c51626b..ad0625e 100644
--- a/Lib/lib2to3/main.py
+++ b/Lib/lib2to3/main.py
@@ -2,7 +2,7 @@
Main program for 2to3.
"""
-from __future__ import with_statement, print_function
+from __future__ import with_statement
import sys
import os
@@ -80,7 +80,7 @@ class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
- if not os.path.isdir(output_dir) and output_dir:
+ if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
@@ -90,11 +90,11 @@ class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
if os.path.lexists(backup):
try:
os.remove(backup)
- except OSError:
+ except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
- except OSError:
+ except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
@@ -116,18 +116,19 @@ class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
- print(line)
+ print line
sys.stdout.flush()
else:
for line in diff_lines:
- print(line)
+ print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
+
def warn(msg):
- print("WARNING: %s" % (msg,), file=sys.stderr)
+ print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
@@ -194,19 +195,19 @@ def main(fixer_pkg, args=None):
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
- print("Available transformations for the -f/--fix option:")
+ print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
- print(fixname)
+ print fixname
if not args:
return 0
if not args:
- print("At least one file or directory argument required.", file=sys.stderr)
- print("Use --help to show usage.", file=sys.stderr)
+ print >> sys.stderr, "At least one file or directory argument required."
+ print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
- print("Can't write to stdin.", file=sys.stderr)
+ print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
@@ -259,8 +260,8 @@ def main(fixer_pkg, args=None):
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
- print("Sorry, -j isn't supported on this platform.",
- file=sys.stderr)
+ print >> sys.stderr, "Sorry, -j isn't " \
+ "supported on this platform."
return 1
rt.summarize()
diff --git a/Lib/lib2to3/patcomp.py b/Lib/lib2to3/patcomp.py
index f57f495..49ed668 100644
--- a/Lib/lib2to3/patcomp.py
+++ b/Lib/lib2to3/patcomp.py
@@ -11,7 +11,7 @@ The compiler compiles a pattern to a pytree.*Pattern instance.
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
-import io
+import StringIO
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
@@ -27,8 +27,8 @@ class PatternSyntaxError(Exception):
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
- skip = {token.NEWLINE, token.INDENT, token.DEDENT}
- tokens = tokenize.generate_tokens(io.StringIO(input).readline)
+ skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
+ tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
@@ -58,7 +58,7 @@ class PatternCompiler(object):
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
- raise PatternSyntaxError(str(e)) from None
+ raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
@@ -140,7 +140,7 @@ class PatternCompiler(object):
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
- value = str(literals.evalString(node.value))
+ value = unicode(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
diff --git a/Lib/lib2to3/pgen2/conv.py b/Lib/lib2to3/pgen2/conv.py
index ed0cac5..28fbb0b 100644
--- a/Lib/lib2to3/pgen2/conv.py
+++ b/Lib/lib2to3/pgen2/conv.py
@@ -60,8 +60,8 @@ class Converter(grammar.Grammar):
"""
try:
f = open(filename)
- except OSError as err:
- print("Can't open %s: %s" % (filename, err))
+ except IOError, err:
+ print "Can't open %s: %s" % (filename, err)
return False
self.symbol2number = {}
self.number2symbol = {}
@@ -70,8 +70,8 @@ class Converter(grammar.Grammar):
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
- print("%s(%s): can't parse %s" % (filename, lineno,
- line.strip()))
+ print "%s(%s): can't parse %s" % (filename, lineno,
+ line.strip())
else:
symbol, number = mo.groups()
number = int(number)
@@ -111,20 +111,20 @@ class Converter(grammar.Grammar):
"""
try:
f = open(filename)
- except OSError as err:
- print("Can't open %s: %s" % (filename, err))
+ except IOError, err:
+ print "Can't open %s: %s" % (filename, err)
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
# Expect the two #include lines
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == '#include "pgenheaders.h"\n', (lineno, line)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == '#include "grammar.h"\n', (lineno, line)
# Parse the state definitions
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
allarcs = {}
states = []
while line.startswith("static arc "):
@@ -132,35 +132,35 @@ class Converter(grammar.Grammar):
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
line)
assert mo, (lineno, line)
- n, m, k = list(map(int, mo.groups()))
+ n, m, k = map(int, mo.groups())
arcs = []
for _ in range(k):
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
assert mo, (lineno, line)
- i, j = list(map(int, mo.groups()))
+ i, j = map(int, mo.groups())
arcs.append((i, j))
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
allarcs[(n, m)] = arcs
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
- s, t = list(map(int, mo.groups()))
+ s, t = map(int, mo.groups())
assert s == len(states), (lineno, line)
state = []
for _ in range(t):
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
assert mo, (lineno, line)
- k, n, m = list(map(int, mo.groups()))
+ k, n, m = map(int, mo.groups())
arcs = allarcs[n, m]
assert k == len(arcs), (lineno, line)
state.append(arcs)
states.append(state)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
self.states = states
# Parse the dfas
@@ -169,18 +169,18 @@ class Converter(grammar.Grammar):
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
line)
assert mo, (lineno, line)
symbol = mo.group(2)
- number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
+ number, x, y, z = map(int, mo.group(1, 3, 4, 5))
assert self.symbol2number[symbol] == number, (lineno, line)
assert self.number2symbol[number] == symbol, (lineno, line)
assert x == 0, (lineno, line)
state = states[z]
assert y == len(state), (lineno, line)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
assert mo, (lineno, line)
first = {}
@@ -191,18 +191,18 @@ class Converter(grammar.Grammar):
if byte & (1<<j):
first[i*8 + j] = 1
dfas[number] = (state, first)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
self.dfas = dfas
# Parse the labels
labels = []
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
assert mo, (lineno, line)
x, y = mo.groups()
@@ -212,35 +212,35 @@ class Converter(grammar.Grammar):
else:
y = eval(y)
labels.append((x, y))
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
self.labels = labels
# Parse the grammar struct
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"\s+(\d+),$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert ndfas == len(self.dfas)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "\tdfas,\n", (lineno, line)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"\s+{(\d+), labels},$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert nlabels == len(self.labels), (lineno, line)
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
mo = re.match(r"\s+(\d+)$", line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert start in self.number2symbol, (lineno, line)
self.start = start
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
try:
- lineno, line = lineno+1, next(f)
+ lineno, line = lineno+1, f.next()
except StopIteration:
pass
else:
diff --git a/Lib/lib2to3/pgen2/driver.py b/Lib/lib2to3/pgen2/driver.py
index 6471635..a513330 100644
--- a/Lib/lib2to3/pgen2/driver.py
+++ b/Lib/lib2to3/pgen2/driver.py
@@ -16,10 +16,11 @@ __author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
-import io
+import codecs
import os
import logging
import pkgutil
+import StringIO
import sys
# Pgen imports
@@ -43,7 +44,7 @@ class Driver(object):
lineno = 1
column = 0
type = value = start = end = line_text = None
- prefix = ""
+ prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
@@ -94,12 +95,15 @@ class Driver(object):
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
- with io.open(filename, "r", encoding=encoding) as stream:
+ stream = codecs.open(filename, "r", encoding)
+ try:
return self.parse_stream(stream, debug)
+ finally:
+ stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
- tokens = tokenize.generate_tokens(io.StringIO(text).readline)
+ tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
@@ -123,7 +127,7 @@ def load_grammar(gt="Grammar.txt", gp=None,
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
- except OSError as e:
+ except IOError as e:
logger.info("Writing failed: %s", e)
else:
g = grammar.Grammar()
diff --git a/Lib/lib2to3/pgen2/grammar.py b/Lib/lib2to3/pgen2/grammar.py
index a1da546..0b6d86b 100644
--- a/Lib/lib2to3/pgen2/grammar.py
+++ b/Lib/lib2to3/pgen2/grammar.py
@@ -13,10 +13,11 @@ fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
+import collections
import pickle
# Local imports
-from . import token
+from . import token, tokenize
class Grammar(object):
@@ -85,14 +86,27 @@ class Grammar(object):
self.start = 256
def dump(self, filename):
- """Dump the grammar tables to a pickle file."""
+ """Dump the grammar tables to a pickle file.
+
+ dump() recursively changes all dict to OrderedDict, so the pickled file
+ is not exactly the same as what was passed in to dump(). load() uses the
+ pickled file to create the tables, but only changes OrderedDict to dict
+ at the top level; it does not recursively change OrderedDict to dict.
+ So, the loaded tables are different from the original tables that were
+ passed to load() in that some of the OrderedDict (from the pickled file)
+ are not changed back to dict. For parsing, this has no effect on
+ performance because OrderedDict uses dict's __getitem__ with nothing in
+ between.
+ """
with open(filename, "wb") as f:
- pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)
+ d = _make_deterministic(self.__dict__)
+ pickle.dump(d, f, 2)
def load(self, filename):
"""Load the grammar tables from a pickle file."""
- with open(filename, "rb") as f:
- d = pickle.load(f)
+ f = open(filename, "rb")
+ d = pickle.load(f)
+ f.close()
self.__dict__.update(d)
def loads(self, pkl):
@@ -115,17 +129,28 @@ class Grammar(object):
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
- print("s2n")
+ print "s2n"
pprint(self.symbol2number)
- print("n2s")
+ print "n2s"
pprint(self.number2symbol)
- print("states")
+ print "states"
pprint(self.states)
- print("dfas")
+ print "dfas"
pprint(self.dfas)
- print("labels")
+ print "labels"
pprint(self.labels)
- print("start", self.start)
+ print "start", self.start
+
+
+def _make_deterministic(top):
+ if isinstance(top, dict):
+ return collections.OrderedDict(
+ sorted(((k, _make_deterministic(v)) for k, v in top.iteritems())))
+ if isinstance(top, list):
+ return [_make_deterministic(e) for e in top]
+ if isinstance(top, tuple):
+ return tuple(_make_deterministic(e) for e in top)
+ return top
# Map from operator to number (since tokenize doesn't do this)
diff --git a/Lib/lib2to3/pgen2/literals.py b/Lib/lib2to3/pgen2/literals.py
index b9b63e6..0b3948a 100644
--- a/Lib/lib2to3/pgen2/literals.py
+++ b/Lib/lib2to3/pgen2/literals.py
@@ -29,12 +29,12 @@ def escape(m):
try:
i = int(hexes, 16)
except ValueError:
- raise ValueError("invalid hex string escape ('\\%s')" % tail) from None
+ raise ValueError("invalid hex string escape ('\\%s')" % tail)
else:
try:
i = int(tail, 8)
except ValueError:
- raise ValueError("invalid octal string escape ('\\%s')" % tail) from None
+ raise ValueError("invalid octal string escape ('\\%s')" % tail)
return chr(i)
def evalString(s):
@@ -53,7 +53,7 @@ def test():
s = repr(c)
e = evalString(s)
if e != c:
- print(i, c, s, e)
+ print i, c, s, e
if __name__ == "__main__":
diff --git a/Lib/lib2to3/pgen2/parse.py b/Lib/lib2to3/pgen2/parse.py
index cf3fcf7..6bebdbb 100644
--- a/Lib/lib2to3/pgen2/parse.py
+++ b/Lib/lib2to3/pgen2/parse.py
@@ -24,9 +24,6 @@ class ParseError(Exception):
self.value = value
self.context = context
- def __reduce__(self):
- return type(self), (self.msg, self.type, self.value, self.context)
-
class Parser(object):
"""Parser engine.
diff --git a/Lib/lib2to3/pgen2/pgen.py b/Lib/lib2to3/pgen2/pgen.py
index b0cbd16..be4fcad 100644
--- a/Lib/lib2to3/pgen2/pgen.py
+++ b/Lib/lib2to3/pgen2/pgen.py
@@ -26,7 +26,7 @@ class ParserGenerator(object):
def make_grammar(self):
c = PgenGrammar()
- names = list(self.dfas.keys())
+ names = self.dfas.keys()
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
@@ -39,7 +39,7 @@ class ParserGenerator(object):
states = []
for state in dfa:
arcs = []
- for label, next in sorted(state.arcs.items()):
+ for label, next in sorted(state.arcs.iteritems()):
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
@@ -74,7 +74,7 @@ class ParserGenerator(object):
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
- assert isinstance(itoken, int), label
+ assert isinstance(itoken, (int, long)), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
@@ -105,7 +105,7 @@ class ParserGenerator(object):
return ilabel
def addfirstsets(self):
- names = list(self.dfas.keys())
+ names = self.dfas.keys()
names.sort()
for name in names:
if name not in self.first:
@@ -118,7 +118,7 @@ class ParserGenerator(object):
state = dfa[0]
totalset = {}
overlapcheck = {}
- for label, next in state.arcs.items():
+ for label, next in state.arcs.iteritems():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
@@ -133,7 +133,7 @@ class ParserGenerator(object):
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
- for label, itsfirst in overlapcheck.items():
+ for label, itsfirst in overlapcheck.iteritems():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
@@ -192,7 +192,7 @@ class ParserGenerator(object):
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
- for label, nfaset in sorted(arcs.items()):
+ for label, nfaset in sorted(arcs.iteritems()):
for st in states:
if st.nfaset == nfaset:
break
@@ -203,10 +203,10 @@ class ParserGenerator(object):
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
- print("Dump of NFA for", name)
+ print "Dump of NFA for", name
todo = [start]
for i, state in enumerate(todo):
- print(" State", i, state is finish and "(final)" or "")
+ print " State", i, state is finish and "(final)" or ""
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
@@ -214,16 +214,16 @@ class ParserGenerator(object):
j = len(todo)
todo.append(next)
if label is None:
- print(" -> %d" % j)
+ print " -> %d" % j
else:
- print(" %s -> %d" % (label, j))
+ print " %s -> %d" % (label, j)
def dump_dfa(self, name, dfa):
- print("Dump of DFA for", name)
+ print "Dump of DFA for", name
for i, state in enumerate(dfa):
- print(" State", i, state.isfinal and "(final)" or "")
- for label, next in sorted(state.arcs.items()):
- print(" %s -> %d" % (label, dfa.index(next)))
+ print " State", i, state.isfinal and "(final)" or ""
+ for label, next in sorted(state.arcs.iteritems()):
+ print " %s -> %d" % (label, dfa.index(next))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
@@ -319,9 +319,9 @@ class ParserGenerator(object):
return value
def gettoken(self):
- tup = next(self.generator)
+ tup = self.generator.next()
while tup[0] in (tokenize.COMMENT, tokenize.NL):
- tup = next(self.generator)
+ tup = self.generator.next()
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
@@ -330,7 +330,7 @@ class ParserGenerator(object):
try:
msg = msg % args
except:
- msg = " ".join([msg] + list(map(str, args)))
+ msg = " ".join([msg] + map(str, args))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
@@ -348,7 +348,7 @@ class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
- assert isinstance(next(iter(nfaset)), NFAState)
+ assert isinstance(iter(nfaset).next(), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
@@ -361,7 +361,7 @@ class DFAState(object):
self.arcs[label] = next
def unifystate(self, old, new):
- for label, next in self.arcs.items():
+ for label, next in self.arcs.iteritems():
if next is old:
self.arcs[label] = new
@@ -374,7 +374,7 @@ class DFAState(object):
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
- for label, next in self.arcs.items():
+ for label, next in self.arcs.iteritems():
if next is not other.arcs.get(label):
return False
return True
diff --git a/Lib/lib2to3/pgen2/token.py b/Lib/lib2to3/pgen2/token.py
index 1a67955..5fac5ce 100755
--- a/Lib/lib2to3/pgen2/token.py
+++ b/Lib/lib2to3/pgen2/token.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python3
+#! /usr/bin/env python
"""Token constants (from "token.h")."""
@@ -62,15 +62,13 @@ OP = 52
COMMENT = 53
NL = 54
RARROW = 55
-AWAIT = 56
-ASYNC = 57
-ERRORTOKEN = 58
-N_TOKENS = 59
+ERRORTOKEN = 56
+N_TOKENS = 57
NT_OFFSET = 256
#--end constants--
tok_name = {}
-for _name, _value in list(globals().items()):
+for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index 7924ff3..8cae873 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -48,26 +48,22 @@ except NameError:
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
-def _combinations(*l):
- return set(
- x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()
- )
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'\w+'
+Name = r'[a-zA-Z_]\w*'
-Binnumber = r'0[bB]_?[01]+(?:_[01]+)*'
-Hexnumber = r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
-Octnumber = r'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'
-Decnumber = group(r'[1-9]\d*(?:_\d+)*[lL]?', '0[lL]?')
+Binnumber = r'0[bB][01]*'
+Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
+Octnumber = r'0[oO]?[0-7]*[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
-Exponent = r'[eE][-+]?\d+(?:_\d+)*'
-Pointfloat = group(r'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?', r'\.\d+(?:_\d+)*') + maybe(Exponent)
-Expfloat = r'\d+(?:_\d+)*' + Exponent
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
-Imagnumber = group(r'\d+(?:_\d+)*[jJ]', Floatnumber + r'[jJ]')
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
@@ -78,11 +74,10 @@ Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-_litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?"
-Triple = group(_litprefix + "'''", _litprefix + '"""')
+Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
-String = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
@@ -100,38 +95,55 @@ PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
-ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
- _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
-
-_strprefixes = (
- _combinations('r', 'R', 'f', 'F') |
- _combinations('r', 'R', 'b', 'B') |
- {'u', 'U', 'ur', 'uR', 'Ur', 'UR'}
-)
-
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
- **{f"{prefix}'''": single3prog for prefix in _strprefixes},
- **{f'{prefix}"""': double3prog for prefix in _strprefixes},
- **{prefix: None for prefix in _strprefixes}}
-
-triple_quoted = (
- {"'''", '"""'} |
- {f"{prefix}'''" for prefix in _strprefixes} |
- {f'{prefix}"""' for prefix in _strprefixes}
-)
-single_quoted = (
- {"'", '"'} |
- {f"{prefix}'" for prefix in _strprefixes} |
- {f'{prefix}"' for prefix in _strprefixes}
-)
+ "r'''": single3prog, 'r"""': double3prog,
+ "u'''": single3prog, 'u"""': double3prog,
+ "b'''": single3prog, 'b"""': double3prog,
+ "ur'''": single3prog, 'ur"""': double3prog,
+ "br'''": single3prog, 'br"""': double3prog,
+ "R'''": single3prog, 'R"""': double3prog,
+ "U'''": single3prog, 'U"""': double3prog,
+ "B'''": single3prog, 'B"""': double3prog,
+ "uR'''": single3prog, 'uR"""': double3prog,
+ "Ur'''": single3prog, 'Ur"""': double3prog,
+ "UR'''": single3prog, 'UR"""': double3prog,
+ "bR'''": single3prog, 'bR"""': double3prog,
+ "Br'''": single3prog, 'Br"""': double3prog,
+ "BR'''": single3prog, 'BR"""': double3prog,
+ 'r': None, 'R': None,
+ 'u': None, 'U': None,
+ 'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+ "r'''", 'r"""', "R'''", 'R"""',
+ "u'''", 'u"""', "U'''", 'U"""',
+ "b'''", 'b"""', "B'''", 'B"""',
+ "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+ "uR'''", 'uR"""', "UR'''", 'UR"""',
+ "br'''", 'br"""', "Br'''", 'Br"""',
+ "bR'''", 'bR"""', "BR'''", 'BR"""',):
+ triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+ "r'", 'r"', "R'", 'R"',
+ "u'", 'u"', "U'", 'U"',
+ "b'", 'b"', "B'", 'B"',
+ "ur'", 'ur"', "Ur'", 'Ur"',
+ "uR'", 'uR"', "UR'", 'UR"',
+ "br'", 'br"', "Br'", 'Br"',
+ "bR'", 'bR"', "BR'", 'BR"', ):
+ single_quoted[t] = t
tabsize = 8
@@ -139,11 +151,11 @@ class TokenError(Exception): pass
class StopTokenizing(Exception): pass
-def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
- (srow, scol) = xxx_todo_changeme
- (erow, ecol) = xxx_todo_changeme1
- print("%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token)))
+def printtoken(type, token, start, end, line): # for testing
+ (srow, scol) = start
+ (erow, ecol) = end
+ print "%d,%d-%d,%d:\t%s\t%s" % \
+ (srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
@@ -208,7 +220,7 @@ class Untokenizer:
for tok in iterable:
toknum, tokval = tok[:2]
- if toknum in (NAME, NUMBER, ASYNC, AWAIT):
+ if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
@@ -224,8 +236,8 @@ class Untokenizer:
startline = False
toks_append(tokval)
-cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
-blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
+cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)')
+blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)')
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
@@ -321,7 +333,7 @@ def untokenize(iterable):
Round-trip invariant for full input:
Untokenized source will match input source exactly
- Round-trip invariant for limited input:
+ Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
@@ -346,19 +358,14 @@ def generate_tokens(readline):
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
- physical line.
+ logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
+ namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
- # 'stashed' and 'async_*' are used for async/await parsing
- stashed = None
- async_def = False
- async_def_indent = 0
- async_def_nl = False
-
while 1: # loop over lines in stream
try:
line = readline()
@@ -369,7 +376,7 @@ def generate_tokens(readline):
if contstr: # continued string
if not line:
- raise TokenError("EOF in multi-line string", strstart)
+ raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
@@ -399,10 +406,6 @@ def generate_tokens(readline):
pos = pos + 1
if pos == max: break
- if stashed:
- yield stashed
- stashed = None
-
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
@@ -425,22 +428,11 @@ def generate_tokens(readline):
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
-
- if async_def and async_def_indent >= indents[-1]:
- async_def = False
- async_def_nl = False
- async_def_indent = 0
-
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
- if async_def and async_def_nl and async_def_indent >= indents[-1]:
- async_def = False
- async_def_nl = False
- async_def_indent = 0
-
else: # continued statement
if not line:
- raise TokenError("EOF in multi-line statement", (lnum, 0))
+ raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
@@ -450,25 +442,16 @@ def generate_tokens(readline):
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
- if initial in string.digits or \
+ if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
- elif async_def:
- async_def_nl = True
- if stashed:
- yield stashed
- stashed = None
yield (newline, token, spos, epos, line)
-
elif initial == '#':
assert not token.endswith("\n")
- if stashed:
- yield stashed
- stashed = None
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
@@ -476,9 +459,6 @@ def generate_tokens(readline):
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
- if stashed:
- yield stashed
- stashed = None
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
@@ -496,63 +476,22 @@ def generate_tokens(readline):
contline = line
break
else: # ordinary string
- if stashed:
- yield stashed
- stashed = None
yield (STRING, token, spos, epos, line)
- elif initial.isidentifier(): # ordinary name
- if token in ('async', 'await'):
- if async_def:
- yield (ASYNC if token == 'async' else AWAIT,
- token, spos, epos, line)
- continue
-
- tok = (NAME, token, spos, epos, line)
- if token == 'async' and not stashed:
- stashed = tok
- continue
-
- if token == 'def':
- if (stashed
- and stashed[0] == NAME
- and stashed[1] == 'async'):
-
- async_def = True
- async_def_indent = indents[-1]
-
- yield (ASYNC, stashed[1],
- stashed[2], stashed[3],
- stashed[4])
- stashed = None
-
- if stashed:
- yield stashed
- stashed = None
-
- yield tok
+ elif initial in namechars: # ordinary name
+ yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
- if stashed:
- yield stashed
- stashed = None
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
- if stashed:
- yield stashed
- stashed = None
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
- if stashed:
- yield stashed
- stashed = None
-
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
diff --git a/Lib/lib2to3/pygram.py b/Lib/lib2to3/pygram.py
index 24d9db9..7e67e4a 100644
--- a/Lib/lib2to3/pygram.py
+++ b/Lib/lib2to3/pygram.py
@@ -25,7 +25,7 @@ class Symbols(object):
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
- for name, symbol in grammar.symbol2number.items():
+ for name, symbol in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
@@ -36,8 +36,5 @@ python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
-python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy()
-del python_grammar_no_print_and_exec_statement.keywords["exec"]
-
pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
diff --git a/Lib/lib2to3/pytree.py b/Lib/lib2to3/pytree.py
index 2a6ef2e..179caca 100644
--- a/Lib/lib2to3/pytree.py
+++ b/Lib/lib2to3/pytree.py
@@ -13,7 +13,8 @@ There's also a pattern matching implementation here.
__author__ = "Guido van Rossum <guido@python.org>"
import sys
-from io import StringIO
+import warnings
+from StringIO import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
@@ -63,6 +64,16 @@ class Base(object):
__hash__ = None # For Py3 compatibility.
+ def __ne__(self, other):
+ """
+ Compare two nodes for inequality.
+
+ This calls the method _eq().
+ """
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return not self._eq(other)
+
def _eq(self, other):
"""
Compare two nodes for equality.
@@ -98,6 +109,26 @@ class Base(object):
"""
raise NotImplementedError
+ def set_prefix(self, prefix):
+ """
+ Set the prefix for the node (see Leaf class).
+
+ DEPRECATED; use the prefix property directly.
+ """
+ warnings.warn("set_prefix() is deprecated; use the prefix property",
+ DeprecationWarning, stacklevel=2)
+ self.prefix = prefix
+
+ def get_prefix(self):
+ """
+ Return the prefix for the node (see Leaf class).
+
+ DEPRECATED; use the prefix property directly.
+ """
+ warnings.warn("get_prefix() is deprecated; use the prefix property",
+ DeprecationWarning, stacklevel=2)
+ return self.prefix
+
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
@@ -183,7 +214,8 @@ class Base(object):
def leaves(self):
for child in self.children:
- yield from child.leaves()
+ for x in child.leaves():
+ yield x
def depth(self):
if self.parent is None:
@@ -197,12 +229,12 @@ class Base(object):
"""
next_sib = self.next_sibling
if next_sib is None:
- return ""
+ return u""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
- return str(self).encode("ascii")
+ return unicode(self).encode("ascii")
class Node(Base):
@@ -245,7 +277,7 @@ class Node(Base):
This reproduces the input source exactly.
"""
- return "".join(map(str, self.children))
+ return u"".join(map(unicode, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
@@ -262,17 +294,18 @@ class Node(Base):
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
- yield from child.post_order()
+ for node in child.post_order():
+ yield node
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
- yield from child.pre_order()
+ for node in child.pre_order():
+ yield node
- @property
- def prefix(self):
+ def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
@@ -280,11 +313,12 @@ class Node(Base):
return ""
return self.children[0].prefix
- @prefix.setter
- def prefix(self, prefix):
+ def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
+ prefix = property(_prefix_getter, _prefix_setter)
+
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
@@ -354,7 +388,7 @@ class Leaf(Base):
This reproduces the input source exactly.
"""
- return self.prefix + str(self.value)
+ return self.prefix + unicode(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
@@ -380,18 +414,18 @@ class Leaf(Base):
"""Return a pre-order iterator for the tree."""
yield self
- @property
- def prefix(self):
+ def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
- @prefix.setter
- def prefix(self, prefix):
+ def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
+ prefix = property(_prefix_getter, _prefix_setter)
+
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
@@ -514,7 +548,7 @@ class LeafPattern(BasePattern):
if type is not None:
assert 0 <= type < 256, type
if content is not None:
- assert isinstance(content, str), repr(content)
+ assert isinstance(content, basestring), repr(content)
self.type = type
self.content = content
self.name = name
@@ -564,7 +598,7 @@ class NodePattern(BasePattern):
if type is not None:
assert type >= 256, type
if content is not None:
- assert not isinstance(content, str), repr(content)
+ assert not isinstance(content, basestring), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
@@ -699,7 +733,7 @@ class WildcardPattern(BasePattern):
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
- for count in range(self.min, 1 + min(len(nodes), self.max)):
+ for count in xrange(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
@@ -709,8 +743,8 @@ class WildcardPattern(BasePattern):
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
- # ignored. We only have to do this on CPython, though, because other
- # implementations don't have this nasty bug in the first place.
+ # ignored. We don't do this on non-CPython implementation because
+ # they don't have this problem.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
diff --git a/Lib/lib2to3/refactor.py b/Lib/lib2to3/refactor.py
index 55fd60f..8a40deb 100644
--- a/Lib/lib2to3/refactor.py
+++ b/Lib/lib2to3/refactor.py
@@ -8,23 +8,26 @@ recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
+from __future__ import with_statement
+
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
-import io
import os
import pkgutil
import sys
import logging
import operator
import collections
+import StringIO
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
+from . import btm_utils as bu
from . import btm_matcher as bm
@@ -54,7 +57,7 @@ def _get_head_types(pat):
# Always return leafs
if pat.type is None:
raise _EveryNode
- return {pat.type}
+ return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
@@ -91,7 +94,7 @@ def _get_headnode_dict(fixer_list):
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
- for node_type in chain(pygram.python_grammar.symbol2number.values(),
+ for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
@@ -107,14 +110,30 @@ def get_fixers_from_package(pkg_name):
def _identity(obj):
return obj
+if sys.version_info < (3, 0):
+ import codecs
+ _open_with_encoding = codecs.open
+ # codecs.open doesn't translate newlines sadly.
+ def _from_system_newlines(input):
+ return input.replace(u"\r\n", u"\n")
+ def _to_system_newlines(input):
+ if os.linesep != "\n":
+ return input.replace(u"\n", os.linesep)
+ else:
+ return input
+else:
+ _open_with_encoding = open
+ _from_system_newlines = _identity
+ _to_system_newlines = _identity
+
def _detect_future_features(source):
have_docstring = False
- gen = tokenize.generate_tokens(io.StringIO(source).readline)
+ gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
def advance():
- tok = next(gen)
+ tok = gen.next()
return tok[0], tok[1]
- ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
+ ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
@@ -125,20 +144,20 @@ def _detect_future_features(source):
if have_docstring:
break
have_docstring = True
- elif tp == token.NAME and value == "from":
+ elif tp == token.NAME and value == u"from":
tp, value = advance()
- if tp != token.NAME or value != "__future__":
+ if tp != token.NAME or value != u"__future__":
break
tp, value = advance()
- if tp != token.NAME or value != "import":
+ if tp != token.NAME or value != u"import":
break
tp, value = advance()
- if tp == token.OP and value == "(":
+ if tp == token.OP and value == u"(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
- if tp != token.OP or value != ",":
+ if tp != token.OP or value != u",":
break
tp, value = advance()
else:
@@ -232,7 +251,7 @@ class RefactoringTool(object):
try:
fix_class = getattr(mod, class_name)
except AttributeError:
- raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None
+ raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
@@ -307,15 +326,15 @@ class RefactoringTool(object):
"""
try:
f = open(filename, "rb")
- except OSError as err:
+ except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
- with io.open(filename, "r", encoding=encoding, newline='') as f:
- return f.read(), encoding
+ with _open_with_encoding(filename, "r", encoding=encoding) as f:
+ return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
@@ -323,7 +342,7 @@ class RefactoringTool(object):
if input is None:
# Reading the file failed.
return
- input += "\n" # Silence certain parse errors
+ input += u"\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
@@ -335,7 +354,7 @@ class RefactoringTool(object):
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
- self.processed_file(str(tree)[:-1], filename,
+ self.processed_file(unicode(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
@@ -379,7 +398,7 @@ class RefactoringTool(object):
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
- self.processed_file(str(tree), "<stdin>", input)
+ self.processed_file(unicode(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
@@ -514,16 +533,16 @@ class RefactoringTool(object):
set.
"""
try:
- fp = io.open(filename, "w", encoding=encoding, newline='')
- except OSError as err:
+ f = _open_with_encoding(filename, "w", encoding=encoding)
+ except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
-
- with fp:
- try:
- fp.write(new_text)
- except OSError as err:
- self.log_error("Can't write %s: %s", filename, err)
+ try:
+ f.write(_to_system_newlines(new_text))
+ except os.error as err:
+ self.log_error("Can't write %s: %s", filename, err)
+ finally:
+ f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
@@ -547,7 +566,7 @@ class RefactoringTool(object):
block_lineno = None
indent = None
lineno = 0
- for line in input.splitlines(keepends=True):
+ for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
@@ -559,7 +578,7 @@ class RefactoringTool(object):
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
- line == indent + self.PS2.rstrip() + "\n")):
+ line == indent + self.PS2.rstrip() + u"\n")):
block.append(line)
else:
if block is not None:
@@ -571,7 +590,7 @@ class RefactoringTool(object):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
- return "".join(result)
+ return u"".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
@@ -586,17 +605,17 @@ class RefactoringTool(object):
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
- self.log_debug("Source: %s", line.rstrip("\n"))
+ self.log_debug("Source: %s", line.rstrip(u"\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
- new = str(tree).splitlines(keepends=True)
+ new = unicode(tree).splitlines(True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
- assert clipped == ["\n"] * (lineno-1), clipped
- if not new[-1].endswith("\n"):
- new[-1] += "\n"
+ assert clipped == [u"\n"] * (lineno-1), clipped
+ if not new[-1].endswith(u"\n"):
+ new[-1] += u"\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
@@ -637,7 +656,7 @@ class RefactoringTool(object):
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
- tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
+ tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
@@ -660,8 +679,8 @@ class RefactoringTool(object):
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
- elif line == prefix.rstrip() + "\n":
- yield "\n"
+ elif line == prefix.rstrip() + u"\n":
+ yield u"\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
@@ -694,7 +713,7 @@ class MultiprocessRefactoringTool(RefactoringTool):
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
- for i in range(num_processes)]
+ for i in xrange(num_processes)]
try:
for p in processes:
p.start()
@@ -702,7 +721,7 @@ class MultiprocessRefactoringTool(RefactoringTool):
doctests_only)
finally:
self.queue.join()
- for i in range(num_processes):
+ for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
diff --git a/Lib/lib2to3/tests/__init__.py b/Lib/lib2to3/tests/__init__.py
index 54221c7..cfaea0d 100644
--- a/Lib/lib2to3/tests/__init__.py
+++ b/Lib/lib2to3/tests/__init__.py
@@ -1,8 +1,24 @@
+"""Make tests/ into a package. This allows us to "import tests" and
+have tests.all_tests be a TestSuite representing all test cases
+from all test_*.py files in tests/."""
# Author: Collin Winter
import os
+import os.path
+import unittest
+import types
-from test.support import load_package_tests
+from . import support
-def load_tests(*args):
- return load_package_tests(os.path.dirname(__file__), *args)
+all_tests = unittest.TestSuite()
+
+tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
+tests = [t[0:-3] for t in os.listdir(tests_dir)
+ if t.startswith('test_') and t.endswith('.py')]
+
+loader = unittest.TestLoader()
+
+for t in tests:
+ __import__("",globals(),locals(),[t],level=1)
+ mod = globals()[t]
+ all_tests.addTests(loader.loadTestsFromModule(mod))
diff --git a/Lib/lib2to3/tests/__main__.py b/Lib/lib2to3/tests/__main__.py
deleted file mode 100644
index 40a23a2..0000000
--- a/Lib/lib2to3/tests/__main__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from . import load_tests
-import unittest
-
-unittest.main()
diff --git a/Lib/lib2to3/tests/data/crlf.py b/Lib/lib2to3/tests/data/crlf.py
index a83ca8f..dbe2d7b 100644
--- a/Lib/lib2to3/tests/data/crlf.py
+++ b/Lib/lib2to3/tests/data/crlf.py
@@ -1,3 +1,3 @@
-print "hi"
-
-print "Like bad Windows newlines?"
+print "hi"
+
+print "Like bad Windows newlines?"
diff --git a/Lib/lib2to3/tests/data/py3_test_grammar.py b/Lib/lib2to3/tests/data/py3_test_grammar.py
index e0b6828..c0bf7f2 100644
--- a/Lib/lib2to3/tests/data/py3_test_grammar.py
+++ b/Lib/lib2to3/tests/data/py3_test_grammar.py
@@ -72,28 +72,6 @@ class TokenTests(unittest.TestCase):
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
- def testUnderscoresInNumbers(self):
- # Integers
- x = 1_0
- x = 123_456_7_89
- x = 0xabc_123_4_5
- x = 0X_abc_123
- x = 0B11_01
- x = 0b_11_01
- x = 0o45_67
- x = 0O_45_67
-
- # Floats
- x = 3_1.4
- x = 03_1.4
- x = 3_1.
- x = .3_1
- x = 3.1_4
- x = 0_3.1_4
- x = 3e1_4
- x = 3_1e+4_1
- x = 3_1E-4_1
-
def testFloats(self):
x = 3.14
x = 314.
@@ -147,8 +125,6 @@ jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
- x = rf"hello \{True}"; y = f"hello \\{True}"
- self.assertEquals(x, y)
def testEllipsis(self):
x = ...
@@ -343,7 +319,7 @@ class GrammarTests(unittest.TestCase):
def f(x) -> list: pass
self.assertEquals(f.__annotations__, {'return': list})
- # test closures with a variety of oparg's
+ # test MAKE_CLOSURE with a variety of oparg's
closure = 1
def f(): return closure
def f(x=1): return closure
diff --git a/Lib/lib2to3/tests/pytree_idempotency.py b/Lib/lib2to3/tests/pytree_idempotency.py
index 2e7e978..243f7e8 100755
--- a/Lib/lib2to3/tests/pytree_idempotency.py
+++ b/Lib/lib2to3/tests/pytree_idempotency.py
@@ -1,11 +1,9 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
-from __future__ import print_function
-
__author__ = "Guido van Rossum <guido@python.org>"
# Support imports (need to be imported first)
@@ -18,8 +16,8 @@ import logging
# Local imports
from .. import pytree
-from .. import pgen2
-from ..pgen2 import driver
+import pgen2
+from pgen2 import driver
logging.basicConfig()
@@ -30,7 +28,7 @@ def main():
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
- print("No diffs.")
+ print "No diffs."
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
@@ -46,7 +44,7 @@ def main():
fn = fn[:-1]
if not fn.endswith(".py"):
continue
- print("Parsing", fn, file=sys.stderr)
+ print >>sys.stderr, "Parsing", fn
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
@@ -55,29 +53,29 @@ def main():
for dir in sys.path:
try:
names = os.listdir(dir)
- except OSError:
+ except os.error:
continue
- print("Scanning", dir, "...", file=sys.stderr)
+ print >>sys.stderr, "Scanning", dir, "..."
for name in names:
if not name.endswith(".py"):
continue
- print("Parsing", name, file=sys.stderr)
+ print >>sys.stderr, "Parsing", name
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
- except pgen2.parse.ParseError as err:
- print("ParseError:", err)
+ except pgen2.parse.ParseError, err:
+ print "ParseError:", err
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
- print("No problems. Congratulations!")
+ print "No problems. Congratulations!"
else:
- print("Problems in following files:")
+ print "Problems in following files:"
for fn in problems:
- print("***", fn)
+ print "***", fn
def diff(fn, tree):
f = open("@", "w")
diff --git a/Lib/lib2to3/tests/support.py b/Lib/lib2to3/tests/support.py
index fe084e8..8f12de9 100644
--- a/Lib/lib2to3/tests/support.py
+++ b/Lib/lib2to3/tests/support.py
@@ -3,8 +3,10 @@
# Python imports
import unittest
+import sys
import os
import os.path
+import re
from textwrap import dedent
# Local imports
@@ -15,13 +17,7 @@ test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = pgen2_driver.load_grammar(grammar_path)
-grammar_no_print_statement = pgen2_driver.load_grammar(grammar_path)
-del grammar_no_print_statement.keywords["print"]
driver = pgen2_driver.Driver(grammar, convert=pytree.convert)
-driver_no_print_statement = pgen2_driver.Driver(
- grammar_no_print_statement,
- convert=pytree.convert
-)
def parse_string(string):
return driver.parse_string(reformat(string), debug=True)
@@ -32,7 +28,7 @@ def run_all_tests(test_mod=None, tests=None):
unittest.TextTestRunner(verbosity=2).run(tests)
def reformat(string):
- return dedent(string) + "\n\n"
+ return dedent(string) + u"\n\n"
def get_refactorer(fixer_pkg="lib2to3", fixers=None, options=None):
"""
diff --git a/Lib/lib2to3/tests/test_all_fixers.py b/Lib/lib2to3/tests/test_all_fixers.py
index c0507cf..f64b3d9 100644
--- a/Lib/lib2to3/tests/test_all_fixers.py
+++ b/Lib/lib2to3/tests/test_all_fixers.py
@@ -7,13 +7,12 @@ running time.
# Python imports
import unittest
-import test.support
# Local imports
+from lib2to3 import refactor
from . import support
-@test.support.requires_resource('cpu')
class Test_all(support.TestCase):
def setUp(self):
@@ -22,6 +21,3 @@ class Test_all(support.TestCase):
def test_all_project_files(self):
for filepath in support.all_project_files():
self.refactor.refactor_file(filepath)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Lib/lib2to3/tests/test_fixers.py b/Lib/lib2to3/tests/test_fixers.py
index 3da5dd8..c7d5ff9 100644
--- a/Lib/lib2to3/tests/test_fixers.py
+++ b/Lib/lib2to3/tests/test_fixers.py
@@ -2,11 +2,12 @@
# Python imports
import os
+import unittest
from itertools import chain
from operator import itemgetter
# Local imports
-from lib2to3 import pygram, fixer_util
+from lib2to3 import pygram, pytree, refactor, fixer_util
from lib2to3.tests import support
@@ -19,7 +20,7 @@ class FixerTestCase(support.TestCase):
fix_list = [self.fixer]
self.refactor = support.get_refactorer(fixer_pkg, fix_list, options)
self.fixer_log = []
- self.filename = "<string>"
+ self.filename = u"<string>"
for fixer in chain(self.refactor.pre_order,
self.refactor.post_order):
@@ -29,7 +30,7 @@ class FixerTestCase(support.TestCase):
before = support.reformat(before)
after = support.reformat(after)
tree = self.refactor.refactor_string(before, self.filename)
- self.assertEqual(after, str(tree))
+ self.assertEqual(after, unicode(tree))
return tree
def check(self, before, after, ignore_warnings=False):
@@ -285,65 +286,6 @@ class Test_apply(FixerTestCase):
b = """f(*args, **kwds)"""
self.check(a, b)
-class Test_reload(FixerTestCase):
- fixer = "reload"
-
- def test(self):
- b = """reload(a)"""
- a = """import importlib\nimportlib.reload(a)"""
- self.check(b, a)
-
- def test_comment(self):
- b = """reload( a ) # comment"""
- a = """import importlib\nimportlib.reload( a ) # comment"""
- self.check(b, a)
-
- # PEP 8 comments
- b = """reload( a ) # comment"""
- a = """import importlib\nimportlib.reload( a ) # comment"""
- self.check(b, a)
-
- def test_space(self):
- b = """reload( a )"""
- a = """import importlib\nimportlib.reload( a )"""
- self.check(b, a)
-
- b = """reload( a)"""
- a = """import importlib\nimportlib.reload( a)"""
- self.check(b, a)
-
- b = """reload(a )"""
- a = """import importlib\nimportlib.reload(a )"""
- self.check(b, a)
-
- def test_unchanged(self):
- s = """reload(a=1)"""
- self.unchanged(s)
-
- s = """reload(f, g)"""
- self.unchanged(s)
-
- s = """reload(f, *h)"""
- self.unchanged(s)
-
- s = """reload(f, *h, **i)"""
- self.unchanged(s)
-
- s = """reload(f, **i)"""
- self.unchanged(s)
-
- s = """reload(*h, **i)"""
- self.unchanged(s)
-
- s = """reload(*h)"""
- self.unchanged(s)
-
- s = """reload(**i)"""
- self.unchanged(s)
-
- s = """reload()"""
- self.unchanged(s)
-
class Test_intern(FixerTestCase):
fixer = "intern"
@@ -2806,7 +2748,7 @@ class Test_renames(FixerTestCase):
}
def test_import_from(self):
- for mod, (old, new) in list(self.modules.items()):
+ for mod, (old, new) in self.modules.items():
b = "from %s import %s" % (mod, old)
a = "from %s import %s" % (mod, new)
self.check(b, a)
@@ -2815,13 +2757,13 @@ class Test_renames(FixerTestCase):
self.unchanged(s)
def test_import_from_as(self):
- for mod, (old, new) in list(self.modules.items()):
+ for mod, (old, new) in self.modules.items():
b = "from %s import %s as foo_bar" % (mod, old)
a = "from %s import %s as foo_bar" % (mod, new)
self.check(b, a)
def test_import_module_usage(self):
- for mod, (old, new) in list(self.modules.items()):
+ for mod, (old, new) in self.modules.items():
b = """
import %s
foo(%s, %s.%s)
@@ -2834,7 +2776,7 @@ class Test_renames(FixerTestCase):
def XXX_test_from_import_usage(self):
# not implemented yet
- for mod, (old, new) in list(self.modules.items()):
+ for mod, (old, new) in self.modules.items():
b = """
from %s import %s
foo(%s, %s)
@@ -2887,40 +2829,40 @@ class Test_unicode(FixerTestCase):
self.check(b, a)
def test_native_literal_escape_u(self):
- b = r"""'\\\u20ac\U0001d121\\u20ac'"""
- a = r"""'\\\\u20ac\\U0001d121\\u20ac'"""
+ b = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """'\\\\\\\\u20ac\\\\U0001d121\\\\u20ac'"""
self.check(b, a)
- b = r"""r'\\\u20ac\U0001d121\\u20ac'"""
- a = r"""r'\\\u20ac\U0001d121\\u20ac'"""
+ b = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
def test_bytes_literal_escape_u(self):
- b = r"""b'\\\u20ac\U0001d121\\u20ac'"""
- a = r"""b'\\\u20ac\U0001d121\\u20ac'"""
+ b = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
- b = r"""br'\\\u20ac\U0001d121\\u20ac'"""
- a = r"""br'\\\u20ac\U0001d121\\u20ac'"""
+ b = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
def test_unicode_literal_escape_u(self):
- b = r"""u'\\\u20ac\U0001d121\\u20ac'"""
- a = r"""'\\\u20ac\U0001d121\\u20ac'"""
+ b = """u'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
- b = r"""ur'\\\u20ac\U0001d121\\u20ac'"""
- a = r"""r'\\\u20ac\U0001d121\\u20ac'"""
+ b = """ur'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
def test_native_unicode_literal_escape_u(self):
f = 'from __future__ import unicode_literals\n'
- b = f + r"""'\\\u20ac\U0001d121\\u20ac'"""
- a = f + r"""'\\\u20ac\U0001d121\\u20ac'"""
+ b = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
- b = f + r"""r'\\\u20ac\U0001d121\\u20ac'"""
- a = f + r"""r'\\\u20ac\U0001d121\\u20ac'"""
+ b = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
@@ -2954,23 +2896,10 @@ class Test_filter(FixerTestCase):
a = """x = [x for x in range(10) if x%2 == 0]"""
self.check(b, a)
- def test_filter_trailers(self):
- b = """x = filter(None, 'abc')[0]"""
- a = """x = [_f for _f in 'abc' if _f][0]"""
- self.check(b, a)
-
- b = """x = len(filter(f, 'abc')[0])"""
- a = """x = len(list(filter(f, 'abc'))[0])"""
- self.check(b, a)
-
- b = """x = filter(lambda x: x%2 == 0, range(10))[0]"""
- a = """x = [x for x in range(10) if x%2 == 0][0]"""
- self.check(b, a)
-
- # Note the parens around x
- b = """x = filter(lambda (x): x%2 == 0, range(10))[0]"""
- a = """x = [x for x in range(10) if x%2 == 0][0]"""
- self.check(b, a)
+ # XXX This (rare) case is not supported
+## b = """x = filter(f, 'abc')[0]"""
+## a = """x = list(filter(f, 'abc'))[0]"""
+## self.check(b, a)
def test_filter_nochange(self):
a = """b.join(filter(f, 'abc'))"""
@@ -3035,23 +2964,6 @@ class Test_map(FixerTestCase):
a = """x = list(map( f, 'abc' ))"""
self.check(b, a)
- def test_map_trailers(self):
- b = """x = map(f, 'abc')[0]"""
- a = """x = list(map(f, 'abc'))[0]"""
- self.check(b, a)
-
- b = """x = map(None, l)[0]"""
- a = """x = list(l)[0]"""
- self.check(b, a)
-
- b = """x = map(lambda x:x, l)[0]"""
- a = """x = [x for x in l][0]"""
- self.check(b, a)
-
- b = """x = map(f, 'abc')[0][1]"""
- a = """x = list(map(f, 'abc'))[0][1]"""
- self.check(b, a)
-
def test_trailing_comment(self):
b = """x = map(f, 'abc') # foo"""
a = """x = list(map(f, 'abc')) # foo"""
@@ -3096,6 +3008,11 @@ class Test_map(FixerTestCase):
"""
self.warns(b, a, "You should use a for loop here")
+ # XXX This (rare) case is not supported
+## b = """x = map(f, 'abc')[0]"""
+## a = """x = list(map(f, 'abc'))[0]"""
+## self.check(b, a)
+
def test_map_nochange(self):
a = """b.join(map(f, 'abc'))"""
self.unchanged(a)
@@ -3155,10 +3072,6 @@ class Test_zip(FixerTestCase):
super(Test_zip, self).check(b, a)
def test_zip_basic(self):
- b = """x = zip()"""
- a = """x = list(zip())"""
- self.check(b, a)
-
b = """x = zip(a, b, c)"""
a = """x = list(zip(a, b, c))"""
self.check(b, a)
@@ -3167,15 +3080,6 @@ class Test_zip(FixerTestCase):
a = """x = len(list(zip(a, b)))"""
self.check(b, a)
- def test_zip_trailers(self):
- b = """x = zip(a, b, c)[0]"""
- a = """x = list(zip(a, b, c))[0]"""
- self.check(b, a)
-
- b = """x = zip(a, b, c)[0][1]"""
- a = """x = list(zip(a, b, c))[0][1]"""
- self.check(b, a)
-
def test_zip_nochange(self):
a = """b.join(zip(a, b))"""
self.unchanged(a)
@@ -4409,7 +4313,7 @@ class Test_operator(FixerTestCase):
def test_operator_isCallable(self):
b = "operator.isCallable(x)"
- a = "callable(x)"
+ a = "hasattr(x, '__call__')"
self.check(b, a)
def test_operator_sequenceIncludes(self):
@@ -4427,12 +4331,12 @@ class Test_operator(FixerTestCase):
def test_operator_isSequenceType(self):
b = "operator.isSequenceType(x)"
- a = "import collections.abc\nisinstance(x, collections.abc.Sequence)"
+ a = "import collections\nisinstance(x, collections.Sequence)"
self.check(b, a)
def test_operator_isMappingType(self):
b = "operator.isMappingType(x)"
- a = "import collections.abc\nisinstance(x, collections.abc.Mapping)"
+ a = "import collections\nisinstance(x, collections.Mapping)"
self.check(b, a)
def test_operator_isNumberType(self):
@@ -4468,7 +4372,7 @@ class Test_operator(FixerTestCase):
def test_bare_isCallable(self):
s = "isCallable(x)"
- t = "You should use 'callable(x)' here."
+ t = "You should use 'hasattr(x, '__call__')' here."
self.warns_unchanged(s, t)
def test_bare_sequenceIncludes(self):
@@ -4478,12 +4382,12 @@ class Test_operator(FixerTestCase):
def test_bare_operator_isSequenceType(self):
s = "isSequenceType(z)"
- t = "You should use 'isinstance(z, collections.abc.Sequence)' here."
+ t = "You should use 'isinstance(z, collections.Sequence)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isMappingType(self):
s = "isMappingType(x)"
- t = "You should use 'isinstance(x, collections.abc.Mapping)' here."
+ t = "You should use 'isinstance(x, collections.Mapping)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isNumberType(self):
diff --git a/Lib/lib2to3/tests/test_main.py b/Lib/lib2to3/tests/test_main.py
index a33c45c..04131cf 100644
--- a/Lib/lib2to3/tests/test_main.py
+++ b/Lib/lib2to3/tests/test_main.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
+import sys
import codecs
-import io
import logging
import os
import re
import shutil
+import StringIO
import sys
import tempfile
import unittest
@@ -18,6 +19,15 @@ PY2_TEST_MODULE = os.path.join(TEST_DATA_DIR, "py2_test_grammar.py")
class TestMain(unittest.TestCase):
+ if not hasattr(unittest.TestCase, 'assertNotRegex'):
+ # This method was only introduced in 3.2.
+ def assertNotRegex(self, text, regexp, msg=None):
+ import re
+ if not hasattr(regexp, 'search'):
+ regexp = re.compile(regexp)
+ if regexp.search(text):
+ self.fail("regexp %s MATCHED text %r" % (regexp.pattern, text))
+
def setUp(self):
self.temp_dir = None # tearDown() will rmtree this directory if set.
@@ -42,13 +52,13 @@ class TestMain(unittest.TestCase):
sys.stderr = save_stderr
def test_unencodable_diff(self):
- input_stream = io.StringIO("print 'nothing'\nprint u'über'\n")
- out = io.BytesIO()
+ input_stream = StringIO.StringIO(u"print 'nothing'\nprint u'über'\n")
+ out = StringIO.StringIO()
out_enc = codecs.getwriter("ascii")(out)
- err = io.StringIO()
+ err = StringIO.StringIO()
ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
self.assertEqual(ret, 0)
- output = out.getvalue().decode("ascii")
+ output = out.getvalue()
self.assertIn("-print 'nothing'", output)
self.assertIn("WARNING: couldn't encode <stdin>'s diff for "
"your terminal", err.getvalue())
@@ -75,14 +85,14 @@ class TestMain(unittest.TestCase):
def test_filename_changing_on_output_single_dir(self):
"""2to3 a single directory with a new output dir and suffix."""
self.setup_test_source_trees()
- out = io.StringIO()
- err = io.StringIO()
+ out = StringIO.StringIO()
+ err = StringIO.StringIO()
suffix = "TEST"
ret = self.run_2to3_capture(
["-n", "--add-suffix", suffix, "--write-unchanged-files",
"--no-diffs", "--output-dir",
self.py3_dest_dir, self.py2_src_dir],
- io.StringIO(""), out, err)
+ StringIO.StringIO(""), out, err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(" implies -w.", stderr)
@@ -96,7 +106,7 @@ class TestMain(unittest.TestCase):
os.path.join(self.py2_src_dir, name),
os.path.join(self.py3_dest_dir, name+suffix)), stderr)
sep = re.escape(os.sep)
- self.assertRegex(
+ self.assertRegexpMatches(
stderr, r"No changes to .*/__init__\.py".replace("/", sep))
self.assertNotRegex(
stderr, r"No changes to .*/trivial\.py".replace("/", sep))
@@ -104,13 +114,13 @@ class TestMain(unittest.TestCase):
def test_filename_changing_on_output_two_files(self):
"""2to3 two files in one directory with a new output dir."""
self.setup_test_source_trees()
- err = io.StringIO()
+ err = StringIO.StringIO()
py2_files = [self.trivial_py2_file, self.init_py2_file]
expected_files = set(os.path.basename(name) for name in py2_files)
ret = self.run_2to3_capture(
["-n", "-w", "--write-unchanged-files",
"--no-diffs", "--output-dir", self.py3_dest_dir] + py2_files,
- io.StringIO(""), io.StringIO(), err)
+ StringIO.StringIO(""), StringIO.StringIO(), err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(
@@ -121,11 +131,11 @@ class TestMain(unittest.TestCase):
def test_filename_changing_on_output_single_file(self):
"""2to3 a single file with a new output dir."""
self.setup_test_source_trees()
- err = io.StringIO()
+ err = StringIO.StringIO()
ret = self.run_2to3_capture(
["-n", "-w", "--no-diffs", "--output-dir", self.py3_dest_dir,
self.trivial_py2_file],
- io.StringIO(""), io.StringIO(), err)
+ StringIO.StringIO(""), StringIO.StringIO(), err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(
diff --git a/Lib/lib2to3/tests/test_parser.py b/Lib/lib2to3/tests/test_parser.py
index a0c31e8..d2254f1 100644
--- a/Lib/lib2to3/tests/test_parser.py
+++ b/Lib/lib2to3/tests/test_parser.py
@@ -8,11 +8,9 @@ test_grammar.py files from both Python 2 and Python 3.
# Testing imports
from . import support
-from .support import driver, driver_no_print_statement
+from .support import driver, test_dir
# Python imports
-import difflib
-import importlib
import operator
import os
import pickle
@@ -20,6 +18,7 @@ import shutil
import subprocess
import sys
import tempfile
+import types
import unittest
# Local imports
@@ -106,10 +105,10 @@ pgen2_driver.load_grammar(%r, save=True, force=True)
class MyLoader:
def get_data(self, where):
return pickle.dumps({'elephant': 19})
- class MyModule:
+ class MyModule(types.ModuleType):
__file__ = 'parsertestmodule'
- __spec__ = importlib.util.spec_from_loader(modname, MyLoader())
- sys.modules[modname] = MyModule()
+ __loader__ = MyLoader()
+ sys.modules[modname] = MyModule(modname)
self.addCleanup(operator.delitem, sys.modules, modname)
g = pgen2_driver.load_packaged_grammar(modname, 'Grammar.txt')
self.assertEqual(g.elephant, 19)
@@ -135,82 +134,12 @@ class TestMatrixMultiplication(GrammarTest):
class TestYieldFrom(GrammarTest):
- def test_yield_from(self):
+ def test_matrix_multiplication_operator(self):
self.validate("yield from x")
self.validate("(yield from x) + y")
self.invalid_syntax("yield from")
-class TestAsyncAwait(GrammarTest):
- def test_await_expr(self):
- self.validate("""async def foo():
- await x
- """)
-
- self.validate("""async def foo():
- [i async for i in b]
- """)
-
- self.validate("""async def foo():
- {i for i in b
- async for i in a if await i
- for b in i}
- """)
-
- self.validate("""async def foo():
- [await i for i in b if await c]
- """)
-
- self.validate("""async def foo():
- [ i for i in b if c]
- """)
-
- self.validate("""async def foo():
-
- def foo(): pass
-
- def foo(): pass
-
- await x
- """)
-
- self.validate("""async def foo(): return await a""")
-
- self.validate("""def foo():
- def foo(): pass
- async def foo(): await x
- """)
-
- self.invalid_syntax("await x")
- self.invalid_syntax("""def foo():
- await x""")
-
- self.invalid_syntax("""def foo():
- def foo(): pass
- async def foo(): pass
- await x
- """)
-
- def test_async_var(self):
- self.validate("""async = 1""")
- self.validate("""await = 1""")
- self.validate("""def async(): pass""")
-
- def test_async_with(self):
- self.validate("""async def foo():
- async for a in b: pass""")
-
- self.invalid_syntax("""def foo():
- async for a in b: pass""")
-
- def test_async_for(self):
- self.validate("""async def foo():
- async with a: pass""")
-
- self.invalid_syntax("""def foo():
- async with a: pass""")
-
-
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
@@ -253,13 +182,6 @@ class TestUnpackingGeneralizations(GrammarTest):
def test_double_star_dict_literal_after_keywords(self):
self.validate("""func(spam='fried', **{'eggs':'scrambled'})""")
- def test_double_star_expression(self):
- self.validate("""func(**{'a':2} or {})""")
- self.validate("""func(**() or {})""")
-
- def test_star_expression(self):
- self.validate("""func(*[] or [2])""")
-
def test_list_display(self):
self.validate("""[*{2}, 3, *[4]]""")
@@ -281,80 +203,8 @@ class TestUnpackingGeneralizations(GrammarTest):
def test_argument_unpacking_3(self):
self.validate("""f(2, *a, *b, **b, **c, **d)""")
- def test_trailing_commas_1(self):
- self.validate("def f(a, b): call(a, b)")
- self.validate("def f(a, b,): call(a, b,)")
-
- def test_trailing_commas_2(self):
- self.validate("def f(a, *b): call(a, *b)")
- self.validate("def f(a, *b,): call(a, *b,)")
-
- def test_trailing_commas_3(self):
- self.validate("def f(a, b=1): call(a, b=1)")
- self.validate("def f(a, b=1,): call(a, b=1,)")
-
- def test_trailing_commas_4(self):
- self.validate("def f(a, **b): call(a, **b)")
- self.validate("def f(a, **b,): call(a, **b,)")
-
- def test_trailing_commas_5(self):
- self.validate("def f(*a, b=1): call(*a, b=1)")
- self.validate("def f(*a, b=1,): call(*a, b=1,)")
-
- def test_trailing_commas_6(self):
- self.validate("def f(*a, **b): call(*a, **b)")
- self.validate("def f(*a, **b,): call(*a, **b,)")
-
- def test_trailing_commas_7(self):
- self.validate("def f(*, b=1): call(*b)")
- self.validate("def f(*, b=1,): call(*b,)")
-
- def test_trailing_commas_8(self):
- self.validate("def f(a=1, b=2): call(a=1, b=2)")
- self.validate("def f(a=1, b=2,): call(a=1, b=2,)")
-
- def test_trailing_commas_9(self):
- self.validate("def f(a=1, **b): call(a=1, **b)")
- self.validate("def f(a=1, **b,): call(a=1, **b,)")
-
- def test_trailing_commas_lambda_1(self):
- self.validate("f = lambda a, b: call(a, b)")
- self.validate("f = lambda a, b,: call(a, b,)")
- def test_trailing_commas_lambda_2(self):
- self.validate("f = lambda a, *b: call(a, *b)")
- self.validate("f = lambda a, *b,: call(a, *b,)")
-
- def test_trailing_commas_lambda_3(self):
- self.validate("f = lambda a, b=1: call(a, b=1)")
- self.validate("f = lambda a, b=1,: call(a, b=1,)")
-
- def test_trailing_commas_lambda_4(self):
- self.validate("f = lambda a, **b: call(a, **b)")
- self.validate("f = lambda a, **b,: call(a, **b,)")
-
- def test_trailing_commas_lambda_5(self):
- self.validate("f = lambda *a, b=1: call(*a, b=1)")
- self.validate("f = lambda *a, b=1,: call(*a, b=1,)")
-
- def test_trailing_commas_lambda_6(self):
- self.validate("f = lambda *a, **b: call(*a, **b)")
- self.validate("f = lambda *a, **b,: call(*a, **b,)")
-
- def test_trailing_commas_lambda_7(self):
- self.validate("f = lambda *, b=1: call(*b)")
- self.validate("f = lambda *, b=1,: call(*b,)")
-
- def test_trailing_commas_lambda_8(self):
- self.validate("f = lambda a=1, b=2: call(a=1, b=2)")
- self.validate("f = lambda a=1, b=2,: call(a=1, b=2,)")
-
- def test_trailing_commas_lambda_9(self):
- self.validate("f = lambda a=1, **b: call(a=1, **b)")
- self.validate("f = lambda a=1, **b,: call(a=1, **b,)")
-
-
-# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
+# Adaptated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
@@ -382,105 +232,6 @@ class TestFunctionAnnotations(GrammarTest):
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.validate(s)
- def test_9(self):
- s = """def f(
- a: str,
- b: int,
- *,
- c: bool = False,
- **kwargs,
- ) -> None:
- call(c=c, **kwargs,)"""
- self.validate(s)
-
- def test_10(self):
- s = """def f(
- a: str,
- ) -> None:
- call(a,)"""
- self.validate(s)
-
- def test_11(self):
- s = """def f(
- a: str = '',
- ) -> None:
- call(a=a,)"""
- self.validate(s)
-
- def test_12(self):
- s = """def f(
- *args: str,
- ) -> None:
- call(*args,)"""
- self.validate(s)
-
- def test_13(self):
- self.validate("def f(a: str, b: int) -> None: call(a, b)")
- self.validate("def f(a: str, b: int,) -> None: call(a, b,)")
-
- def test_14(self):
- self.validate("def f(a: str, *b: int) -> None: call(a, *b)")
- self.validate("def f(a: str, *b: int,) -> None: call(a, *b,)")
-
- def test_15(self):
- self.validate("def f(a: str, b: int=1) -> None: call(a, b=1)")
- self.validate("def f(a: str, b: int=1,) -> None: call(a, b=1,)")
-
- def test_16(self):
- self.validate("def f(a: str, **b: int) -> None: call(a, **b)")
- self.validate("def f(a: str, **b: int,) -> None: call(a, **b,)")
-
- def test_17(self):
- self.validate("def f(*a: str, b: int=1) -> None: call(*a, b=1)")
- self.validate("def f(*a: str, b: int=1,) -> None: call(*a, b=1,)")
-
- def test_18(self):
- self.validate("def f(*a: str, **b: int) -> None: call(*a, **b)")
- self.validate("def f(*a: str, **b: int,) -> None: call(*a, **b,)")
-
- def test_19(self):
- self.validate("def f(*, b: int=1) -> None: call(*b)")
- self.validate("def f(*, b: int=1,) -> None: call(*b,)")
-
- def test_20(self):
- self.validate("def f(a: str='', b: int=2) -> None: call(a=a, b=2)")
- self.validate("def f(a: str='', b: int=2,) -> None: call(a=a, b=2,)")
-
- def test_21(self):
- self.validate("def f(a: str='', **b: int) -> None: call(a=a, **b)")
- self.validate("def f(a: str='', **b: int,) -> None: call(a=a, **b,)")
-
-
-# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.test_var_annot
-class TestVarAnnotations(GrammarTest):
- def test_1(self):
- self.validate("var1: int = 5")
-
- def test_2(self):
- self.validate("var2: [int, str]")
-
- def test_3(self):
- self.validate("def f():\n"
- " st: str = 'Hello'\n"
- " a.b: int = (1, 2)\n"
- " return st\n")
-
- def test_4(self):
- self.validate("def fbad():\n"
- " x: int\n"
- " print(x)\n")
-
- def test_5(self):
- self.validate("class C:\n"
- " x: int\n"
- " s: str = 'attr'\n"
- " z = 2\n"
- " def __init__(self, x):\n"
- " self.x: int = x\n")
-
- def test_6(self):
- self.validate("lst: List[int] = []")
-
class TestExcept(GrammarTest):
def test_new(self):
@@ -500,27 +251,6 @@ class TestExcept(GrammarTest):
self.validate(s)
-class TestStringLiterals(GrammarTest):
- prefixes = ("'", '"',
- "r'", 'r"', "R'", 'R"',
- "u'", 'u"', "U'", 'U"',
- "b'", 'b"', "B'", 'B"',
- "f'", 'f"', "F'", 'F"',
- "ur'", 'ur"', "Ur'", 'Ur"',
- "uR'", 'uR"', "UR'", 'UR"',
- "br'", 'br"', "Br'", 'Br"',
- "bR'", 'bR"', "BR'", 'BR"',
- "rb'", 'rb"', "Rb'", 'Rb"',
- "rB'", 'rB"', "RB'", 'RB"',)
-
- def test_lit(self):
- for pre in self.prefixes:
- single = "{p}spamspamspam{s}".format(p=pre, s=pre[-1])
- self.validate(single)
- triple = "{p}{s}{s}eggs{s}{s}{s}".format(p=pre, s=pre[-1])
- self.validate(triple)
-
-
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
@@ -536,16 +266,6 @@ class TestSetLiteral(GrammarTest):
self.validate("""x = {2, 3, 4,}""")
-# Adapted from Python 3's Lib/test/test_unicode_identifiers.py and
-# Lib/test/test_tokenize.py:TokenizeTest.test_non_ascii_identifiers
-class TestIdentifier(GrammarTest):
- def test_non_ascii_identifiers(self):
- self.validate("Örter = 'places'\ngrün = 'green'")
- self.validate("蟒 = a蟒 = 锦蛇 = 1")
- self.validate("µ = aµ = µµ = 1")
- self.validate("𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = a_𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = 1")
-
-
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
self.validate("""0o7777777777777""")
@@ -562,7 +282,7 @@ class TestClassDef(GrammarTest):
self.validate("class B(t, *args): pass")
self.validate("class B(t, **kwargs): pass")
self.validate("class B(t, *args, **kwargs): pass")
- self.validate("class B(t, y=9, *args, **kwargs,): pass")
+ self.validate("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(support.TestCase):
@@ -570,23 +290,20 @@ class TestParserIdempotency(support.TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_all_project_files(self):
+ if sys.platform.startswith("win"):
+ # XXX something with newlines goes wrong on Windows.
+ return
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertIsNotNone(encoding,
"can't detect encoding for %s" % filepath)
- with open(filepath, "r", encoding=encoding) as fp:
+ with open(filepath, "r") as fp:
source = fp.read()
- try:
- tree = driver.parse_string(source)
- except ParseError:
- try:
- tree = driver_no_print_statement.parse_string(source)
- except ParseError as err:
- self.fail('ParseError on file %s (%s)' % (filepath, err))
- new = str(tree)
- if new != source:
- print(diff_texts(source, new, filepath))
+ source = source.decode(encoding)
+ tree = driver.parse_string(source)
+ new = unicode(tree)
+ if diff(filepath, new, encoding):
self.fail("Idempotency failed: %s" % filepath)
def test_extended_unpacking(self):
@@ -595,7 +312,6 @@ class TestParserIdempotency(support.TestCase):
driver.parse_string("(z, *y, w) = m\n")
driver.parse_string("for *z, m in d: pass\n")
-
class TestLiterals(GrammarTest):
def validate(self, s):
@@ -629,25 +345,14 @@ class TestLiterals(GrammarTest):
self.validate(s)
-class TestPickleableException(unittest.TestCase):
- def test_ParseError(self):
- err = ParseError('msg', 2, None, (1, 'context'))
- for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- err2 = pickle.loads(pickle.dumps(err, protocol=proto))
- self.assertEqual(err.args, err2.args)
- self.assertEqual(err.msg, err2.msg)
- self.assertEqual(err.type, err2.type)
- self.assertEqual(err.value, err2.value)
- self.assertEqual(err.context, err2.context)
-
-
-def diff_texts(a, b, filename):
- a = a.splitlines()
- b = b.splitlines()
- return difflib.unified_diff(a, b, filename, filename,
- "(original)", "(reserialized)",
- lineterm="")
-
-
-if __name__ == '__main__':
- unittest.main()
+def diff(fn, result, encoding):
+ f = open("@", "w")
+ try:
+ f.write(result.encode(encoding))
+ finally:
+ f.close()
+ try:
+ fn = fn.replace('"', '\\"')
+ return os.system('diff -u "%s" @' % fn)
+ finally:
+ os.remove("@")
diff --git a/Lib/lib2to3/tests/test_pytree.py b/Lib/lib2to3/tests/test_pytree.py
index 177126d..ccddce6 100644
--- a/Lib/lib2to3/tests/test_pytree.py
+++ b/Lib/lib2to3/tests/test_pytree.py
@@ -9,6 +9,11 @@ more helpful than printing of (the first line of) the docstring,
especially when debugging a test.
"""
+from __future__ import with_statement
+
+import sys
+import warnings
+
# Testing imports
from . import support
@@ -26,6 +31,23 @@ class TestNodes(support.TestCase):
"""Unit tests for nodes (Base, Leaf, Node)."""
+ if sys.version_info >= (2,6):
+ # warnings.catch_warnings is new in 2.6.
+ def test_deprecated_prefix_methods(self):
+ l = pytree.Leaf(100, "foo")
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always", DeprecationWarning)
+ self.assertEqual(l.get_prefix(), "")
+ l.set_prefix("hi")
+ self.assertEqual(l.prefix, "hi")
+ self.assertEqual(len(w), 2)
+ for warning in w:
+ self.assertTrue(warning.category is DeprecationWarning)
+ self.assertEqual(str(w[0].message), "get_prefix() is deprecated; " \
+ "use the prefix property")
+ self.assertEqual(str(w[1].message), "set_prefix() is deprecated; " \
+ "use the prefix property")
+
def test_instantiate_base(self):
if __debug__:
# Test that instantiating Base() raises an AssertionError
diff --git a/Lib/lib2to3/tests/test_refactor.py b/Lib/lib2to3/tests/test_refactor.py
index 9e3b8fb..c737aa5 100644
--- a/Lib/lib2to3/tests/test_refactor.py
+++ b/Lib/lib2to3/tests/test_refactor.py
@@ -2,18 +2,24 @@
Unit tests for refactor.py.
"""
+from __future__ import with_statement
+
import sys
import os
import codecs
-import io
+import operator
import re
+import StringIO
import tempfile
import shutil
import unittest
+import warnings
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
+from . import support
+
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
@@ -127,7 +133,7 @@ from __future__ import print_function"""
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
- for fixes in d.values():
+ for fixes in d.itervalues():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
@@ -167,7 +173,7 @@ from __future__ import print_function"""
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
- sys.stdin = io.StringIO("def parrot(): pass\n\n")
+ sys.stdin = StringIO.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
@@ -180,42 +186,32 @@ from __future__ import print_function"""
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
- test_file = self.init_test_file(test_file)
- old_contents = self.read_file(test_file)
+ tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
+ self.addCleanup(shutil.rmtree, tmpdir)
+ # make a copy of the tested file that we can write to
+ shutil.copy(test_file, tmpdir)
+ test_file = os.path.join(tmpdir, os.path.basename(test_file))
+ os.chmod(test_file, 0o644)
+
+ def read_file():
+ with open(test_file, "rb") as fp:
+ return fp.read()
+
+ old_contents = read_file()
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
- self.assertEqual(old_contents, self.read_file(test_file))
+ self.assertEqual(old_contents, read_file())
if not actually_write:
return
rt.refactor_file(test_file, True)
- new_contents = self.read_file(test_file)
+ new_contents = read_file()
self.assertNotEqual(old_contents, new_contents)
return new_contents
- def init_test_file(self, test_file):
- tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
- self.addCleanup(shutil.rmtree, tmpdir)
- shutil.copy(test_file, tmpdir)
- test_file = os.path.join(tmpdir, os.path.basename(test_file))
- os.chmod(test_file, 0o644)
- return test_file
-
- def read_file(self, test_file):
- with open(test_file, "rb") as fp:
- return fp.read()
-
- def refactor_file(self, test_file, fixers=_2TO3_FIXERS):
- test_file = self.init_test_file(test_file)
- old_contents = self.read_file(test_file)
- rt = self.rt(fixers=fixers)
- rt.refactor_file(test_file, True)
- new_contents = self.read_file(test_file)
- return old_contents, new_contents
-
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
@@ -235,7 +231,7 @@ from __future__ import print_function"""
re.escape(os.sep + os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
- self.assertRegex(message, message_regex)
+ self.assertRegexpMatches(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
@@ -295,13 +291,6 @@ from __future__ import print_function"""
finally:
os.linesep = old_sep
- def test_crlf_unchanged(self):
- fn = os.path.join(TEST_DATA_DIR, "crlf.py")
- old, new = self.refactor_file(fn)
- self.assertIn(b"\r\n", old)
- self.assertIn(b"\r\n", new)
- self.assertNotIn(b"\r\r\n", new)
-
def test_refactor_docstring(self):
rt = self.rt()
diff --git a/Lib/lib2to3/tests/test_util.py b/Lib/lib2to3/tests/test_util.py
index c6c6139..2fab8b9 100644
--- a/Lib/lib2to3/tests/test_util.py
+++ b/Lib/lib2to3/tests/test_util.py
@@ -3,6 +3,9 @@
# Testing imports
from . import support
+# Python imports
+import os.path
+
# Local imports
from lib2to3.pytree import Node, Leaf
from lib2to3 import fixer_util
@@ -578,14 +581,14 @@ class Test_find_indentation(support.TestCase):
def test_nothing(self):
fi = fixer_util.find_indentation
node = parse("node()")
- self.assertEqual(fi(node), "")
+ self.assertEqual(fi(node), u"")
node = parse("")
- self.assertEqual(fi(node), "")
+ self.assertEqual(fi(node), u"")
def test_simple(self):
fi = fixer_util.find_indentation
node = parse("def f():\n x()")
- self.assertEqual(fi(node), "")
- self.assertEqual(fi(node.children[0].children[4].children[2]), " ")
+ self.assertEqual(fi(node), u"")
+ self.assertEqual(fi(node.children[0].children[4].children[2]), u" ")
node = parse("def f():\n x()\n y()")
- self.assertEqual(fi(node.children[0].children[4].children[4]), " ")
+ self.assertEqual(fi(node.children[0].children[4].children[4]), u" ")