summaryrefslogtreecommitdiffstats
path: root/Lib/lib2to3/pgen2
diff options
context:
space:
mode:
authorBenjamin Peterson <benjamin@python.org>2009-05-09 19:42:23 (GMT)
committerBenjamin Peterson <benjamin@python.org>2009-05-09 19:42:23 (GMT)
commitd481e3d7914d20238c62c76991255b3b2b5e4a17 (patch)
treefb9a3831c561486f09fde515d41410c3f8753007 /Lib/lib2to3/pgen2
parentb0ba27dff1442fe6dc7b00ce7d8488afb159d9b8 (diff)
downloadcpython-d481e3d7914d20238c62c76991255b3b2b5e4a17.zip
cpython-d481e3d7914d20238c62c76991255b3b2b5e4a17.tar.gz
cpython-d481e3d7914d20238c62c76991255b3b2b5e4a17.tar.bz2
Merged revisions 72494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk ................ r72494 | benjamin.peterson | 2009-05-08 20:01:14 -0500 (Fri, 08 May 2009) | 21 lines Merged revisions 72491-72493 via svnmerge from svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3 ........ r72491 | benjamin.peterson | 2009-05-08 19:33:27 -0500 (Fri, 08 May 2009) | 7 lines make 2to3 use unicode internally on 2.x This started out as a fix for #2660, but became this large refactoring when I realized the dire state this was in. 2to3 now uses tokenize.detect_encoding to decode the files correctly into unicode. ........ r72492 | benjamin.peterson | 2009-05-08 19:35:38 -0500 (Fri, 08 May 2009) | 1 line remove compat code ........ r72493 | benjamin.peterson | 2009-05-08 19:54:15 -0500 (Fri, 08 May 2009) | 1 line add a test for \r\n newlines ........ ................
Diffstat (limited to 'Lib/lib2to3/pgen2')
-rw-r--r--Lib/lib2to3/pgen2/driver.py5
-rw-r--r--Lib/lib2to3/pgen2/tokenize.py70
2 files changed, 73 insertions, 2 deletions
diff --git a/Lib/lib2to3/pgen2/driver.py b/Lib/lib2to3/pgen2/driver.py
index a025b37..ee77a13 100644
--- a/Lib/lib2to3/pgen2/driver.py
+++ b/Lib/lib2to3/pgen2/driver.py
@@ -16,6 +16,7 @@ __author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
+import codecs
import os
import logging
import sys
@@ -90,9 +91,9 @@ class Driver(object):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
- def parse_file(self, filename, debug=False):
+ def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
- stream = open(filename)
+ stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index 33cfc33..799566b 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -30,6 +30,7 @@ __credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
+from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
@@ -228,6 +229,75 @@ class Untokenizer:
startline = False
toks_append(tokval)
+cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+
+def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argment, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read
+ in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ invalid charset, raise a SyntaxError.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ bom_found = False
+ encoding = None
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return b''
+
+ def find_cookie(line):
+ try:
+ line_string = line.decode('ascii')
+ except UnicodeDecodeError:
+ return None
+
+ matches = cookie_re.findall(line_string)
+ if not matches:
+ return None
+ encoding = matches[0]
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError("unknown encoding: " + encoding)
+
+ if bom_found and codec.name != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError('encoding problem: utf-8')
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ if not first:
+ return 'utf-8', []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+
+ second = read_or_stop()
+ if not second:
+ return 'utf-8', [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return 'utf-8', [first, second]
+
def untokenize(iterable):
"""Transform tokens back into Python source code.