summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorVictor Stinner <victor.stinner@haypocalc.com>2010-11-09 01:08:59 (GMT)
committerVictor Stinner <victor.stinner@haypocalc.com>2010-11-09 01:08:59 (GMT)
commit58c0752a33253641c1423fac2d4ef3f623fbcb46 (patch)
tree2e2ada02342f78d3cc58a4fe23082818c4025b1b /Lib
parentae4836df6d0ea92d778ef30bd37417d048fc37fc (diff)
downloadcpython-58c0752a33253641c1423fac2d4ef3f623fbcb46.zip
cpython-58c0752a33253641c1423fac2d4ef3f623fbcb46.tar.gz
cpython-58c0752a33253641c1423fac2d4ef3f623fbcb46.tar.bz2
Issue #10335: Add tokenize.open(), detect the file encoding using
tokenize.detect_encoding() and open it in read only mode.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/linecache.py4
-rw-r--r--Lib/py_compile.py4
-rwxr-xr-xLib/tabnanny.py5
-rw-r--r--Lib/test/test_tokenize.py23
-rw-r--r--Lib/tokenize.py15
-rw-r--r--Lib/trace.py5
6 files changed, 42 insertions, 14 deletions
diff --git a/Lib/linecache.py b/Lib/linecache.py
index 974b1d9..c3f2c3f 100644
--- a/Lib/linecache.py
+++ b/Lib/linecache.py
@@ -123,9 +123,7 @@ def updatecache(filename, module_globals=None):
else:
return []
try:
- with open(fullname, 'rb') as fp:
- coding, line = tokenize.detect_encoding(fp.readline)
- with open(fullname, 'r', encoding=coding) as fp:
+ with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
diff --git a/Lib/py_compile.py b/Lib/py_compile.py
index 111893e..d241434 100644
--- a/Lib/py_compile.py
+++ b/Lib/py_compile.py
@@ -104,9 +104,7 @@ def compile(file, cfile=None, dfile=None, doraise=False):
byte-compile all installed files (or all files in selected
directories).
"""
- with open(file, "rb") as f:
- encoding = tokenize.detect_encoding(f.readline)[0]
- with open(file, encoding=encoding) as f:
+ with tokenize.open(file) as f:
try:
timestamp = int(os.fstat(f.fileno()).st_mtime)
except AttributeError:
diff --git a/Lib/tabnanny.py b/Lib/tabnanny.py
index 7053fd9..a4d4ef0 100755
--- a/Lib/tabnanny.py
+++ b/Lib/tabnanny.py
@@ -93,11 +93,8 @@ def check(file):
check(fullname)
return
- with open(file, 'rb') as f:
- encoding, lines = tokenize.detect_encoding(f.readline)
-
try:
- f = open(file, encoding=encoding)
+ f = tokenize.open(file)
except IOError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 10e59b9..f98efcb 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -564,7 +564,8 @@ Non-ascii identifiers
from test import support
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
- STRING, ENDMARKER, tok_name, detect_encoding)
+ STRING, ENDMARKER, tok_name, detect_encoding,
+ open as tokenize_open)
from io import BytesIO
from unittest import TestCase
import os, sys, glob
@@ -857,6 +858,26 @@ class TestDetectEncoding(TestCase):
readline = self.get_readline((b'# coding: bad\n',))
self.assertRaises(SyntaxError, detect_encoding, readline)
+ def test_open(self):
+ filename = support.TESTFN + '.py'
+ self.addCleanup(support.unlink, filename)
+
+ # test coding cookie
+ for encoding in ('iso-8859-15', 'utf-8'):
+ with open(filename, 'w', encoding=encoding) as fp:
+ print("# coding: %s" % encoding, file=fp)
+ print("print('euro:\u20ac')", file=fp)
+ with tokenize_open(filename) as fp:
+ assert fp.encoding == encoding
+ assert fp.mode == 'r'
+
+ # test BOM (no coding cookie)
+ with open(filename, 'w', encoding='utf-8-sig') as fp:
+ print("print('euro:\u20ac')", file=fp)
+ with tokenize_open(filename) as fp:
+ assert fp.encoding == 'utf-8-sig'
+ assert fp.mode == 'r'
+
class TestTokenize(TestCase):
def test_tokenize(self):
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index eb58831..7745412 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -29,6 +29,7 @@ import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
+from io import TextIOWrapper
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
import token
@@ -335,6 +336,20 @@ def detect_encoding(readline):
return default, [first, second]
+_builtin_open = open
+
+def open(filename):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = _builtin_open(filename, 'rb')
+ encoding, lines = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+
+
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
diff --git a/Lib/trace.py b/Lib/trace.py
index 8ea4b89..b50aa02 100644
--- a/Lib/trace.py
+++ b/Lib/trace.py
@@ -432,10 +432,9 @@ def find_strings(filename, encoding=None):
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
- with io.FileIO(filename, 'r') as file:
- encoding, lines = tokenize.detect_encoding(file.readline)
- with open(filename, "r", encoding=encoding) as f:
+ with tokenize.open(filename) as f:
prog = f.read()
+ encoding = f.encoding
except IOError as err:
print(("Not printing coverage data for %r: %s"
% (filename, err)), file=sys.stderr)