summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Lib/lib2to3/Grammar.txt12
-rw-r--r--Lib/lib2to3/fixes/fix_idioms.py25
-rw-r--r--Lib/lib2to3/fixes/fix_map.py18
-rw-r--r--Lib/lib2to3/fixes/fix_tuple_params.py2
-rw-r--r--Lib/lib2to3/pgen2/pgen.py2
-rw-r--r--Lib/lib2to3/pgen2/tokenize.py15
-rw-r--r--Lib/lib2to3/pytree.py2
-rw-r--r--Lib/lib2to3/tests/test_all_fixers.py3
-rwxr-xr-xLib/lib2to3/tests/test_fixers.py55
-rw-r--r--Lib/lib2to3/tests/test_parser.py5
10 files changed, 118 insertions, 21 deletions
diff --git a/Lib/lib2to3/Grammar.txt b/Lib/lib2to3/Grammar.txt
index 4cf4e32..be034e1 100644
--- a/Lib/lib2to3/Grammar.txt
+++ b/Lib/lib2to3/Grammar.txt
@@ -53,8 +53,9 @@ stmt: simple_stmt | compound_stmt
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | exec_stmt | assert_stmt)
-expr_stmt: testlist (augassign (yield_expr|testlist) |
- ('=' (yield_expr|testlist))*)
+expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
+ ('=' (yield_expr|testlist_star_expr))*)
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
# For normal assignments, additional restrictions enforced by the interpreter
@@ -112,6 +113,7 @@ and_test: not_test ('and' not_test)*
not_test: 'not' not_test | comparison
comparison: expr (comp_op expr)*
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
expr: xor_expr ('|' xor_expr)*
xor_expr: and_expr ('^' and_expr)*
and_expr: shift_expr ('&' shift_expr)*
@@ -125,14 +127,14 @@ atom: ('(' [yield_expr|testlist_gexp] ')' |
'{' [dictsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+ | '.' '.' '.')
-listmaker: test ( comp_for | (',' test)* [','] )
-testlist_gexp: test ( comp_for | (',' test)* [','] )
+listmaker: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+testlist_gexp: test ( comp_for | (',' (test|star_expr))* [','] )
lambdef: 'lambda' [varargslist] ':' test
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test]
-exprlist: expr (',' expr)* [',']
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [',']
dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) )
diff --git a/Lib/lib2to3/fixes/fix_idioms.py b/Lib/lib2to3/fixes/fix_idioms.py
index 1f68faf..9bee99b 100644
--- a/Lib/lib2to3/fixes/fix_idioms.py
+++ b/Lib/lib2to3/fixes/fix_idioms.py
@@ -29,7 +29,7 @@ into
# Local imports
from .. import fixer_base
-from ..fixer_util import Call, Comma, Name, Node, syms
+from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
@@ -130,5 +130,24 @@ class FixIdioms(fixer_base.BaseFix):
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
- if next_stmt:
- next_stmt[0].prefix = sort_stmt.prefix
+
+ btwn = sort_stmt.prefix
+ # Keep any prefix lines between the sort_stmt and the list_call and
+ # shove them right after the sorted() call.
+ if "\n" in btwn:
+ if next_stmt:
+ # The new prefix should be everything from the sort_stmt's
+ # prefix up to the last newline, then the old prefix after a new
+ # line.
+ prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix)
+ next_stmt[0].prefix = "\n".join(prefix_lines)
+ else:
+ assert list_call.parent
+ assert list_call.next_sibling is None
+ # Put a blank line after list_call and set its prefix.
+ end_line = BlankLine()
+ list_call.parent.append_child(end_line)
+ assert list_call.next_sibling is end_line
+ # The new prefix should be everything up to the first new line
+ # of sort_stmt's prefix.
+ end_line.prefix = btwn.rpartition("\n")[0]
diff --git a/Lib/lib2to3/fixes/fix_map.py b/Lib/lib2to3/fixes/fix_map.py
index cf8f351..77d66c4 100644
--- a/Lib/lib2to3/fixes/fix_map.py
+++ b/Lib/lib2to3/fixes/fix_map.py
@@ -49,8 +49,7 @@ class FixMap(fixer_base.ConditionalFix):
>
|
power<
- 'map'
- args=trailer< '(' [any] ')' >
+ 'map' trailer< '(' [arglist=any] ')' >
>
"""
@@ -66,13 +65,22 @@ class FixMap(fixer_base.ConditionalFix):
new.prefix = ""
new = Call(Name("list"), [new])
elif "map_lambda" in results:
- new = ListComp(results.get("xp").clone(),
- results.get("fp").clone(),
- results.get("it").clone())
+ new = ListComp(results["xp"].clone(),
+ results["fp"].clone(),
+ results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
+ if "arglist" in results:
+ args = results["arglist"]
+ if args.type == syms.arglist and \
+ args.children[0].type == token.NAME and \
+ args.children[0].value == "None":
+ self.warning(node, "cannot convert map(None, ...) "
+ "with multiple arguments because map() "
+ "now truncates to the shortest sequence")
+ return
if in_special_context(node):
return None
new = node.clone()
diff --git a/Lib/lib2to3/fixes/fix_tuple_params.py b/Lib/lib2to3/fixes/fix_tuple_params.py
index 41b8dc2..fb47d01 100644
--- a/Lib/lib2to3/fixes/fix_tuple_params.py
+++ b/Lib/lib2to3/fixes/fix_tuple_params.py
@@ -96,6 +96,8 @@ class FixTupleParams(fixer_base.BaseFix):
new_lines[0].prefix = indent
after = start + 1
+ for line in new_lines:
+ line.parent = suite[0]
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
diff --git a/Lib/lib2to3/pgen2/pgen.py b/Lib/lib2to3/pgen2/pgen.py
index 61f3d50..2c51eef 100644
--- a/Lib/lib2to3/pgen2/pgen.py
+++ b/Lib/lib2to3/pgen2/pgen.py
@@ -379,6 +379,8 @@ class DFAState(object):
return False
return True
+ __hash__ = None # For Py3 compatibility.
+
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index 799566b..4585ca3 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -231,6 +231,17 @@ class Untokenizer:
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
@@ -265,7 +276,7 @@ def detect_encoding(readline):
matches = cookie_re.findall(line_string)
if not matches:
return None
- encoding = matches[0]
+ encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
@@ -375,7 +386,7 @@ def generate_tokens(readline):
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
- elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
+ elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
diff --git a/Lib/lib2to3/pytree.py b/Lib/lib2to3/pytree.py
index f6df3a0..15b83f6 100644
--- a/Lib/lib2to3/pytree.py
+++ b/Lib/lib2to3/pytree.py
@@ -63,6 +63,8 @@ class Base(object):
return NotImplemented
return self._eq(other)
+ __hash__ = None # For Py3 compatibility.
+
def __ne__(self, other):
"""
Compare two nodes for inequality.
diff --git a/Lib/lib2to3/tests/test_all_fixers.py b/Lib/lib2to3/tests/test_all_fixers.py
index 8ef222d..1e0cb29 100644
--- a/Lib/lib2to3/tests/test_all_fixers.py
+++ b/Lib/lib2to3/tests/test_all_fixers.py
@@ -16,8 +16,7 @@ from . import support
class Test_all(support.TestCase):
def setUp(self):
- options = {"print_function" : False}
- self.refactor = support.get_refactorer(options=options)
+ self.refactor = support.get_refactorer()
def test_all_project_files(self):
for filepath in support.all_project_files():
diff --git a/Lib/lib2to3/tests/test_fixers.py b/Lib/lib2to3/tests/test_fixers.py
index d8218d3..2a39359 100755
--- a/Lib/lib2to3/tests/test_fixers.py
+++ b/Lib/lib2to3/tests/test_fixers.py
@@ -339,6 +339,12 @@ class Test_reduce(FixerTestCase):
a = "from functools import reduce\nreduce(a, b, c)"
self.check(b, a)
+ def test_bug_7253(self):
+ # fix_tuple_params was being bad and orphaning nodes in the tree.
+ b = "def x(arg): reduce(sum, [])"
+ a = "from functools import reduce\ndef x(arg): reduce(sum, [])"
+ self.check(b, a)
+
def test_call_with_lambda(self):
b = "reduce(lambda x, y: x + y, seq)"
a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
@@ -2834,6 +2840,11 @@ class Test_map(FixerTestCase):
a = """x = list(map(f, 'abc')) # foo"""
self.check(b, a)
+ def test_None_with_multiple_arguments(self):
+ s = """x = map(None, a, b, c)"""
+ self.warns_unchanged(s, "cannot convert map(None, ...) with "
+ "multiple arguments")
+
def test_map_basic(self):
b = """x = map(f, 'abc')"""
a = """x = list(map(f, 'abc'))"""
@@ -2847,10 +2858,6 @@ class Test_map(FixerTestCase):
a = """x = list('abc')"""
self.check(b, a)
- b = """x = map(None, 'abc', 'def')"""
- a = """x = list(map(None, 'abc', 'def'))"""
- self.check(b, a)
-
b = """x = map(lambda x: x+1, range(4))"""
a = """x = [x+1 for x in range(4)]"""
self.check(b, a)
@@ -3238,6 +3245,46 @@ class Test_idioms(FixerTestCase):
"""
self.check(b, a)
+ b = r"""
+ try:
+ m = list(s)
+ m.sort()
+ except: pass
+ """
+
+ a = r"""
+ try:
+ m = sorted(s)
+ except: pass
+ """
+ self.check(b, a)
+
+ b = r"""
+ try:
+ m = list(s)
+ # foo
+ m.sort()
+ except: pass
+ """
+
+ a = r"""
+ try:
+ m = sorted(s)
+ # foo
+ except: pass
+ """
+ self.check(b, a)
+
+ b = r"""
+ m = list(s)
+ # more comments
+ m.sort()"""
+
+ a = r"""
+ m = sorted(s)
+ # more comments"""
+ self.check(b, a)
+
def test_sort_simple_expr(self):
b = """
v = t
diff --git a/Lib/lib2to3/tests/test_parser.py b/Lib/lib2to3/tests/test_parser.py
index 08e7bdc..a4599f4 100644
--- a/Lib/lib2to3/tests/test_parser.py
+++ b/Lib/lib2to3/tests/test_parser.py
@@ -161,6 +161,11 @@ class TestParserIdempotency(support.TestCase):
if diff(filepath, new):
self.fail("Idempotency failed: %s" % filepath)
+ def test_extended_unpacking(self):
+ driver.parse_string("a, *b, c = x\n")
+ driver.parse_string("[*a, b] = x\n")
+ driver.parse_string("(z, *y, w) = m\n")
+ driver.parse_string("for *z, m in d: pass\n")
class TestLiterals(GrammarTest):