summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorJelle Zijlstra <jelle.zijlstra@gmail.com>2017-10-06 03:24:46 (GMT)
committerYury Selivanov <yury@magic.io>2017-10-06 03:24:46 (GMT)
commitac317700ce7439e38a8b420218d9a5035bba92ed (patch)
treeddeb7d90f2e90b73a37783b88ef77376d9d996f5 /Lib
parent2084b30e540d88b9fc752c5bdcc2f24334af4f2b (diff)
downloadcpython-ac317700ce7439e38a8b420218d9a5035bba92ed.zip
cpython-ac317700ce7439e38a8b420218d9a5035bba92ed.tar.gz
cpython-ac317700ce7439e38a8b420218d9a5035bba92ed.tar.bz2
bpo-30406: Make async and await proper keywords (#1669)
Per PEP 492, 'async' and 'await' should become proper keywords in 3.7.
Diffstat (limited to 'Lib')
-rwxr-xr-xLib/keyword.py2
-rw-r--r--Lib/lib2to3/Grammar.txt10
-rwxr-xr-xLib/lib2to3/pgen2/token.py6
-rw-r--r--Lib/lib2to3/pgen2/tokenize.py76
-rw-r--r--Lib/lib2to3/tests/test_parser.py26
-rw-r--r--Lib/pydoc.py4
-rwxr-xr-xLib/symbol.py11
-rw-r--r--Lib/test/test_asyncio/test_tasks.py6
-rw-r--r--Lib/test/test_coroutines.py24
-rw-r--r--Lib/test/test_parser.py6
-rw-r--r--Lib/test/test_tokenize.py28
-rw-r--r--Lib/tokenize.py62
12 files changed, 59 insertions, 202 deletions
diff --git a/Lib/keyword.py b/Lib/keyword.py
index 6e1e882..431991d 100755
--- a/Lib/keyword.py
+++ b/Lib/keyword.py
@@ -20,6 +20,8 @@ kwlist = [
'and',
'as',
'assert',
+ 'async',
+ 'await',
'break',
'class',
'continue',
diff --git a/Lib/lib2to3/Grammar.txt b/Lib/lib2to3/Grammar.txt
index ded0325..0bdfcaf 100644
--- a/Lib/lib2to3/Grammar.txt
+++ b/Lib/lib2to3/Grammar.txt
@@ -34,7 +34,7 @@ eval_input: testlist NEWLINE* ENDMARKER
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
decorators: decorator+
decorated: decorators (classdef | funcdef | async_funcdef)
-async_funcdef: ASYNC funcdef
+async_funcdef: 'async' funcdef
funcdef: 'def' NAME parameters ['->' test] ':' suite
parameters: '(' [typedargslist] ')'
typedargslist: ((tfpdef ['=' test] ',')*
@@ -85,7 +85,7 @@ exec_stmt: 'exec' expr ['in' test [',' test]]
assert_stmt: 'assert' test [',' test]
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
-async_stmt: ASYNC (funcdef | with_stmt | for_stmt)
+async_stmt: 'async' (funcdef | with_stmt | for_stmt)
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
while_stmt: 'while' test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
@@ -124,7 +124,7 @@ shift_expr: arith_expr (('<<'|'>>') arith_expr)*
arith_expr: term (('+'|'-') term)*
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power
-power: [AWAIT] atom trailer* ['**' factor]
+power: ['await'] atom trailer* ['**' factor]
atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictsetmaker] '}' |
@@ -161,7 +161,7 @@ argument: ( test [comp_for] |
star_expr )
comp_iter: comp_for | comp_if
-comp_for: [ASYNC] 'for' exprlist 'in' or_test [comp_iter]
+comp_for: ['async'] 'for' exprlist 'in' or_test [comp_iter]
comp_if: 'if' old_test [comp_iter]
# As noted above, testlist_safe extends the syntax allowed in list
@@ -180,7 +180,7 @@ comp_if: 'if' old_test [comp_iter]
#
# See https://bugs.python.org/issue27494
old_comp_iter: old_comp_for | old_comp_if
-old_comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [old_comp_iter]
+old_comp_for: ['async'] 'for' exprlist 'in' testlist_safe [old_comp_iter]
old_comp_if: 'if' old_test [old_comp_iter]
testlist1: test (',' test)*
diff --git a/Lib/lib2to3/pgen2/token.py b/Lib/lib2to3/pgen2/token.py
index 1a67955..7599396 100755
--- a/Lib/lib2to3/pgen2/token.py
+++ b/Lib/lib2to3/pgen2/token.py
@@ -62,10 +62,8 @@ OP = 52
COMMENT = 53
NL = 54
RARROW = 55
-AWAIT = 56
-ASYNC = 57
-ERRORTOKEN = 58
-N_TOKENS = 59
+ERRORTOKEN = 56
+N_TOKENS = 57
NT_OFFSET = 256
#--end constants--
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index 45afc5f..14560e4 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -234,7 +234,7 @@ class Untokenizer:
for tok in iterable:
toknum, tokval = tok[:2]
- if toknum in (NAME, NUMBER, ASYNC, AWAIT):
+ if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
@@ -380,12 +380,6 @@ def generate_tokens(readline):
contline = None
indents = [0]
- # 'stashed' and 'async_*' are used for async/await parsing
- stashed = None
- async_def = False
- async_def_indent = 0
- async_def_nl = False
-
while 1: # loop over lines in stream
try:
line = readline()
@@ -426,10 +420,6 @@ def generate_tokens(readline):
pos = pos + 1
if pos == max: break
- if stashed:
- yield stashed
- stashed = None
-
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
@@ -453,18 +443,8 @@ def generate_tokens(readline):
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
- if async_def and async_def_indent >= indents[-1]:
- async_def = False
- async_def_nl = False
- async_def_indent = 0
-
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
- if async_def and async_def_nl and async_def_indent >= indents[-1]:
- async_def = False
- async_def_nl = False
- async_def_indent = 0
-
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
@@ -484,18 +464,10 @@ def generate_tokens(readline):
newline = NEWLINE
if parenlev > 0:
newline = NL
- elif async_def:
- async_def_nl = True
- if stashed:
- yield stashed
- stashed = None
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
- if stashed:
- yield stashed
- stashed = None
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
@@ -503,9 +475,6 @@ def generate_tokens(readline):
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
- if stashed:
- yield stashed
- stashed = None
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
@@ -523,63 +492,22 @@ def generate_tokens(readline):
contline = line
break
else: # ordinary string
- if stashed:
- yield stashed
- stashed = None
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
- if token in ('async', 'await'):
- if async_def:
- yield (ASYNC if token == 'async' else AWAIT,
- token, spos, epos, line)
- continue
-
- tok = (NAME, token, spos, epos, line)
- if token == 'async' and not stashed:
- stashed = tok
- continue
-
- if token == 'def':
- if (stashed
- and stashed[0] == NAME
- and stashed[1] == 'async'):
-
- async_def = True
- async_def_indent = indents[-1]
-
- yield (ASYNC, stashed[1],
- stashed[2], stashed[3],
- stashed[4])
- stashed = None
-
- if stashed:
- yield stashed
- stashed = None
-
- yield tok
+ yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
- if stashed:
- yield stashed
- stashed = None
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
- if stashed:
- yield stashed
- stashed = None
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
- if stashed:
- yield stashed
- stashed = None
-
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
diff --git a/Lib/lib2to3/tests/test_parser.py b/Lib/lib2to3/tests/test_parser.py
index 2efcb80..dc94a69 100644
--- a/Lib/lib2to3/tests/test_parser.py
+++ b/Lib/lib2to3/tests/test_parser.py
@@ -167,34 +167,34 @@ class TestAsyncAwait(GrammarTest):
async def foo(): await x
""")
- self.invalid_syntax("await x")
- self.invalid_syntax("""def foo():
- await x""")
+ self.validate("await x")
+ self.validate("""def foo():
+ await x""")
- self.invalid_syntax("""def foo():
+ self.validate("""def foo():
def foo(): pass
async def foo(): pass
await x
""")
def test_async_var(self):
- self.validate("""async = 1""")
- self.validate("""await = 1""")
- self.validate("""def async(): pass""")
+ self.invalid_syntax("""async = 1""")
+ self.invalid_syntax("""await = 1""")
+ self.invalid_syntax("""def async(): pass""")
def test_async_with(self):
self.validate("""async def foo():
async for a in b: pass""")
- self.invalid_syntax("""def foo():
- async for a in b: pass""")
+ self.validate("""def foo():
+ async for a in b: pass""")
def test_async_for(self):
self.validate("""async def foo():
async with a: pass""")
- self.invalid_syntax("""def foo():
- async with a: pass""")
+ self.validate("""def foo():
+ async with a: pass""")
class TestRaiseChanges(GrammarTest):
@@ -477,3 +477,7 @@ def diff(fn, result):
os.remove("@")
except OSError:
pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index 8dc3c0a..01f7a32 100644
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -1703,7 +1703,7 @@ class Helper:
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
- # list of needed labels in Doc/tools/pyspecific.py and
+ # list of needed labels in Doc/tools/extensions/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
@@ -1715,6 +1715,8 @@ class Helper:
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
+ 'async': ('async', ''),
+ 'await': ('await', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
diff --git a/Lib/symbol.py b/Lib/symbol.py
index d9f01e0..dc7dcba 100755
--- a/Lib/symbol.py
+++ b/Lib/symbol.py
@@ -91,11 +91,12 @@ classdef = 333
arglist = 334
argument = 335
comp_iter = 336
-comp_for = 337
-comp_if = 338
-encoding_decl = 339
-yield_expr = 340
-yield_arg = 341
+sync_comp_for = 337
+comp_for = 338
+comp_if = 339
+encoding_decl = 340
+yield_expr = 341
+yield_arg = 342
#--end constants--
sym_name = {}
diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py
index 7ff56b5..e23963a 100644
--- a/Lib/test/test_asyncio/test_tasks.py
+++ b/Lib/test/test_asyncio/test_tasks.py
@@ -231,12 +231,6 @@ class BaseTaskTests:
with self.assertRaises(TypeError):
asyncio.ensure_future('ok')
- def test_async_warning(self):
- f = self.new_future(self.loop)
- with self.assertWarnsRegex(DeprecationWarning,
- 'function is deprecated, use ensure_'):
- self.assertIs(f, asyncio.async(f))
-
def test_get_stack(self):
T = None
diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py
index 2b79a17..ebd880b 100644
--- a/Lib/test/test_coroutines.py
+++ b/Lib/test/test_coroutines.py
@@ -394,20 +394,14 @@ class AsyncBadSyntaxTest(unittest.TestCase):
]
for code in samples:
- with self.subTest(code=code), self.assertWarnsRegex(
- DeprecationWarning,
- "'await' will become reserved keywords"):
+ with self.subTest(code=code), self.assertRaises(SyntaxError):
compile(code, "<test>", "exec")
def test_badsyntax_3(self):
- with self.assertRaises(DeprecationWarning):
- with warnings.catch_warnings():
- warnings.simplefilter("error")
- compile("async = 1", "<test>", "exec")
-
- def test_goodsyntax_1(self):
- # Tests for issue 24619
+ with self.assertRaises(SyntaxError):
+ compile("async = 1", "<test>", "exec")
+ def test_badsyntax_4(self):
samples = [
'''def foo(await):
async def foo(): pass
@@ -454,14 +448,8 @@ class AsyncBadSyntaxTest(unittest.TestCase):
]
for code in samples:
- with self.subTest(code=code):
- loc = {}
-
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- exec(code, loc, loc)
-
- self.assertEqual(loc['foo'](10), 11)
+ with self.subTest(code=code), self.assertRaises(SyntaxError):
+ compile(code, "<test>", "exec")
class TokenizerRegrTest(unittest.TestCase):
diff --git a/Lib/test/test_parser.py b/Lib/test/test_parser.py
index 70cabb2..647d391 100644
--- a/Lib/test/test_parser.py
+++ b/Lib/test/test_parser.py
@@ -679,16 +679,16 @@ class IllegalSyntaxTestCase(unittest.TestCase):
def test_illegal_encoding(self):
# Illegal encoding declaration
tree = \
- (339,
+ (340,
(257, (0, '')))
self.check_bad_tree(tree, "missed encoding")
tree = \
- (339,
+ (340,
(257, (0, '')),
b'iso-8859-1')
self.check_bad_tree(tree, "non-string encoding")
tree = \
- (339,
+ (340,
(257, (0, '')),
'\udcff')
with self.assertRaises(UnicodeEncodeError):
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 21eee6d..3520a67 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -759,7 +759,7 @@ def"', """\
""")
self.check_tokenize("async def foo(): pass", """\
- ASYNC 'async' (1, 0) (1, 5)
+ NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
@@ -776,7 +776,7 @@ async def foo():
await
async += 1
''', """\
- ASYNC 'async' (1, 0) (1, 5)
+ NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
@@ -787,12 +787,12 @@ async += 1
NAME 'def' (2, 2) (2, 5)
NAME 'foo' (2, 6) (2, 9)
OP '(' (2, 9) (2, 10)
- AWAIT 'await' (2, 10) (2, 15)
+ NAME 'await' (2, 10) (2, 15)
OP ')' (2, 15) (2, 16)
OP ':' (2, 16) (2, 17)
NEWLINE '\\n' (2, 17) (2, 18)
INDENT ' ' (3, 0) (3, 4)
- AWAIT 'await' (3, 4) (3, 9)
+ NAME 'await' (3, 4) (3, 9)
OP '=' (3, 10) (3, 11)
NUMBER '1' (3, 12) (3, 13)
NEWLINE '\\n' (3, 13) (3, 14)
@@ -802,7 +802,7 @@ async += 1
OP ':' (4, 6) (4, 7)
NEWLINE '\\n' (4, 7) (4, 8)
INDENT ' ' (5, 0) (5, 4)
- AWAIT 'await' (5, 4) (5, 9)
+ NAME 'await' (5, 4) (5, 9)
NEWLINE '\\n' (5, 9) (5, 10)
DEDENT '' (6, 0) (6, 0)
DEDENT '' (6, 0) (6, 0)
@@ -815,7 +815,7 @@ async += 1
self.check_tokenize('''\
async def foo():
async for i in 1: pass''', """\
- ASYNC 'async' (1, 0) (1, 5)
+ NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
@@ -823,7 +823,7 @@ async def foo():
OP ':' (1, 15) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
INDENT ' ' (2, 0) (2, 2)
- ASYNC 'async' (2, 2) (2, 7)
+ NAME 'async' (2, 2) (2, 7)
NAME 'for' (2, 8) (2, 11)
NAME 'i' (2, 12) (2, 13)
NAME 'in' (2, 14) (2, 16)
@@ -834,14 +834,14 @@ async def foo():
""")
self.check_tokenize('''async def foo(async): await''', """\
- ASYNC 'async' (1, 0) (1, 5)
+ NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
- ASYNC 'async' (1, 14) (1, 19)
+ NAME 'async' (1, 14) (1, 19)
OP ')' (1, 19) (1, 20)
OP ':' (1, 20) (1, 21)
- AWAIT 'await' (1, 22) (1, 27)
+ NAME 'await' (1, 22) (1, 27)
""")
self.check_tokenize('''\
@@ -866,7 +866,7 @@ def f():
OP ':' (3, 11) (3, 12)
NAME 'pass' (3, 13) (3, 17)
NEWLINE '\\n' (3, 17) (3, 18)
- ASYNC 'async' (4, 2) (4, 7)
+ NAME 'async' (4, 2) (4, 7)
NAME 'def' (4, 8) (4, 11)
NAME 'bar' (4, 12) (4, 15)
OP '(' (4, 15) (4, 16)
@@ -888,7 +888,7 @@ async def f():
async def bar(): pass
await = 2''', """\
- ASYNC 'async' (1, 0) (1, 5)
+ NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'f' (1, 10) (1, 11)
OP '(' (1, 11) (1, 12)
@@ -904,7 +904,7 @@ async def f():
OP ':' (3, 11) (3, 12)
NAME 'pass' (3, 13) (3, 17)
NEWLINE '\\n' (3, 17) (3, 18)
- ASYNC 'async' (4, 2) (4, 7)
+ NAME 'async' (4, 2) (4, 7)
NAME 'def' (4, 8) (4, 11)
NAME 'bar' (4, 12) (4, 15)
OP '(' (4, 15) (4, 16)
@@ -913,7 +913,7 @@ async def f():
NAME 'pass' (4, 19) (4, 23)
NEWLINE '\\n' (4, 23) (4, 24)
NL '\\n' (5, 0) (5, 1)
- AWAIT 'await' (6, 2) (6, 7)
+ NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 5fa4152..f5c6ac7 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -491,12 +491,6 @@ def _tokenize(readline, encoding):
contline = None
indents = [0]
- # 'stashed' and 'async_*' are used for async/await parsing
- stashed = None
- async_def = False
- async_def_indent = 0
- async_def_nl = False
-
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
@@ -571,18 +565,8 @@ def _tokenize(readline, encoding):
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
- if async_def and async_def_indent >= indents[-1]:
- async_def = False
- async_def_nl = False
- async_def_indent = 0
-
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
- if async_def and async_def_nl and async_def_indent >= indents[-1]:
- async_def = False
- async_def_nl = False
- async_def_indent = 0
-
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
@@ -601,21 +585,13 @@ def _tokenize(readline, encoding):
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
- if stashed:
- yield stashed
- stashed = None
if parenlev > 0:
yield TokenInfo(NL, token, spos, epos, line)
else:
yield TokenInfo(NEWLINE, token, spos, epos, line)
- if async_def:
- async_def_nl = True
elif initial == '#':
assert not token.endswith("\n")
- if stashed:
- yield stashed
- stashed = None
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
@@ -662,36 +638,7 @@ def _tokenize(readline, encoding):
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
- if token in ('async', 'await'):
- if async_def:
- yield TokenInfo(
- ASYNC if token == 'async' else AWAIT,
- token, spos, epos, line)
- continue
-
- tok = TokenInfo(NAME, token, spos, epos, line)
- if token == 'async' and not stashed:
- stashed = tok
- continue
-
- if token == 'def':
- if (stashed
- and stashed.type == NAME
- and stashed.string == 'async'):
-
- async_def = True
- async_def_indent = indents[-1]
-
- yield TokenInfo(ASYNC, stashed.string,
- stashed.start, stashed.end,
- stashed.line)
- stashed = None
-
- if stashed:
- yield stashed
- stashed = None
-
- yield tok
+ yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
@@ -699,19 +646,12 @@ def _tokenize(readline, encoding):
parenlev += 1
elif initial in ')]}':
parenlev -= 1
- if stashed:
- yield stashed
- stashed = None
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
- if stashed:
- yield stashed
- stashed = None
-
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')