summaryrefslogtreecommitdiffstats
path: root/Parser
diff options
context:
space:
mode:
authorGuido van Rossum <guido@python.org>2007-05-15 18:46:22 (GMT)
committerGuido van Rossum <guido@python.org>2007-05-15 18:46:22 (GMT)
commit1bc535dc7854b6be009a6bf3413a3a470e3fe749 (patch)
tree7a43646468849a9ae624bd4314ff26b7b0e30f21 /Parser
parent360e4b8fb19f34360093bc15ef9aad13115a6069 (diff)
downloadcpython-1bc535dc7854b6be009a6bf3413a3a470e3fe749.zip
cpython-1bc535dc7854b6be009a6bf3413a3a470e3fe749.tar.gz
cpython-1bc535dc7854b6be009a6bf3413a3a470e3fe749.tar.bz2
Merged revisions 55328-55341 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/p3yk ........ r55329 | brett.cannon | 2007-05-14 16:36:56 -0700 (Mon, 14 May 2007) | 3 lines Implement the removal of tuple parameter unpacking (PEP 3113). Thanks, Tony Lownds for the patch. ........ r55331 | neal.norwitz | 2007-05-14 16:40:30 -0700 (Mon, 14 May 2007) | 1 line Update to use Python 3.0 ........ r55332 | brett.cannon | 2007-05-14 16:47:18 -0700 (Mon, 14 May 2007) | 2 lines Mention PEP 3113. And thanks to Tony Lownds for the PEP 3113 patch. ........ r55333 | neal.norwitz | 2007-05-14 16:57:06 -0700 (Mon, 14 May 2007) | 1 line Fix exception printing (no more exceptions module) ........ r55334 | neal.norwitz | 2007-05-14 17:11:10 -0700 (Mon, 14 May 2007) | 1 line Remove popen* functions from os ........ r55335 | neal.norwitz | 2007-05-14 18:03:38 -0700 (Mon, 14 May 2007) | 1 line Get rid of most of popen. There are still some uses I need to cleanup. ........ r55336 | neal.norwitz | 2007-05-14 21:11:34 -0700 (Mon, 14 May 2007) | 1 line Remove a few more remnants of the compiler package ........ r55337 | neal.norwitz | 2007-05-14 22:28:27 -0700 (Mon, 14 May 2007) | 1 line Get test_[cx]pickle working on 64-bit platforms (avoid overflow int/long) ........
Diffstat (limited to 'Parser')
-rw-r--r--Parser/Python.asdl3
-rw-r--r--Parser/asdl.py79
-rw-r--r--Parser/spark.py8
3 files changed, 51 insertions, 39 deletions
diff --git a/Parser/Python.asdl b/Parser/Python.asdl
index 96d0022..c122089 100644
--- a/Parser/Python.asdl
+++ b/Parser/Python.asdl
@@ -110,8 +110,7 @@ module Python version "$Revision$"
arg* kwonlyargs, identifier? kwarg,
expr? kwargannotation, expr* defaults,
expr* kw_defaults)
- arg = SimpleArg(identifier arg, expr? annotation)
- | NestedArgs(arg* args)
+ arg = (identifier arg, expr? annotation)
-- keyword arguments supplied to call
keyword = (identifier arg, expr value)
diff --git a/Parser/asdl.py b/Parser/asdl.py
index b1afd0f..a3701e6 100644
--- a/Parser/asdl.py
+++ b/Parser/asdl.py
@@ -115,49 +115,54 @@ class ASDLParser(spark.GenericParser, object):
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
- def p_module_0(self, (module, name, version, _0, _1)):
+ def p_module_0(self, info):
" module ::= Id Id version { } "
+ module, name, version, _0, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
- def p_module(self, (module, name, version, _0, definitions, _1)):
+ def p_module(self, info):
" module ::= Id Id version { definitions } "
+ module, name, version, _0, definitions, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
- def p_version(self, (version, V)):
+ def p_version(self, info):
"version ::= Id String"
+ version, V = info
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
- def p_definition_0(self, (definition,)):
+ def p_definition_0(self, definition):
" definitions ::= definition "
- return definition
+ return definition[0]
- def p_definition_1(self, (definitions, definition)):
+ def p_definition_1(self, definitions):
" definitions ::= definition definitions "
- return definitions + definition
+ return definitions[0] + definitions[1]
- def p_definition(self, (id, _, type)):
+ def p_definition(self, info):
" definition ::= Id = type "
+ id, _, type = info
return [Type(id, type)]
- def p_type_0(self, (product,)):
+ def p_type_0(self, product):
" type ::= product "
- return product
+ return product[0]
- def p_type_1(self, (sum,)):
+ def p_type_1(self, sum):
" type ::= sum "
- return Sum(sum)
+ return Sum(sum[0])
- def p_type_2(self, (sum, id, _0, attributes, _1)):
+ def p_type_2(self, info):
" type ::= sum Id ( fields ) "
+ sum, id, _0, attributes, _1 = info
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
@@ -165,65 +170,73 @@ class ASDLParser(spark.GenericParser, object):
attributes.reverse()
return Sum(sum, attributes)
- def p_product(self, (_0, fields, _1)):
+ def p_product(self, info):
" product ::= ( fields ) "
+ _0, fields, _1 = info
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
- def p_sum_0(self, (constructor,)):
+ def p_sum_0(self, constructor):
" sum ::= constructor """
- return [constructor]
+ return [constructor[0]]
- def p_sum_1(self, (constructor, _, sum)):
+ def p_sum_1(self, ):
" sum ::= constructor | sum "
+ constructor, _, sum = info
return [constructor] + sum
- def p_sum_2(self, (constructor, _, sum)):
+ def p_sum_2(self, info):
" sum ::= constructor | sum "
+ constructor, _, sum = info
return [constructor] + sum
- def p_constructor_0(self, (id,)):
+ def p_constructor_0(self, id):
" constructor ::= Id "
- return Constructor(id)
+ return Constructor(id[0])
- def p_constructor_1(self, (id, _0, fields, _1)):
+ def p_constructor_1(self, info):
" constructor ::= Id ( fields ) "
+ id, _0, fields, _1 = info
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
- def p_fields_0(self, (field,)):
+ def p_fields_0(self, field):
" fields ::= field "
- return [field]
+ return [field[0]]
- def p_fields_1(self, (field, _, fields)):
+ def p_fields_1(self, info):
" fields ::= field , fields "
+ field, _, fields = info
return fields + [field]
- def p_field_0(self, (type,)):
+ def p_field_0(self, type_):
" field ::= Id "
- return Field(type)
+ return Field(type_[0])
- def p_field_1(self, (type, name)):
+ def p_field_1(self, info):
" field ::= Id Id "
+ type, name = info
return Field(type, name)
- def p_field_2(self, (type, _, name)):
+ def p_field_2(self, info):
" field ::= Id * Id "
+ type, _, name = info
return Field(type, name, seq=1)
- def p_field_3(self, (type, _, name)):
+ def p_field_3(self, info):
" field ::= Id ? Id "
+ type, _, name = info
return Field(type, name, opt=1)
- def p_field_4(self, (type, _)):
+ def p_field_4(self, type_):
" field ::= Id * "
- return Field(type, seq=1)
+ return Field(type_[0], seq=1)
- def p_field_5(self, (type, _)):
+ def p_field_5(self, type_):
" field ::= Id ? "
- return Field(type, opt=1)
+ return Field(type[0], opt=1)
builtin_types = ("identifier", "string", "int", "bool", "object")
diff --git a/Parser/spark.py b/Parser/spark.py
index 7035077..0fc6945 100644
--- a/Parser/spark.py
+++ b/Parser/spark.py
@@ -353,10 +353,10 @@ class GenericParser:
#
return self._NULLABLE == sym[0:len(self._NULLABLE)]
- def skip(self, (lhs, rhs), pos=0):
- n = len(rhs)
+ def skip(self, hs, pos=0):
+ n = len(hs[1])
while pos < n:
- if not self.isnullable(rhs[pos]):
+ if not self.isnullable(hs[1][pos]):
break
pos = pos + 1
return pos
@@ -671,7 +671,7 @@ class GenericParser:
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
- list = map(lambda (a,b): b, sortlist)
+ list = [b for a, b in sortlist]
return rules[name2index[self.resolve(list)]]
def resolve(self, list):