summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_tokenize.py
diff options
context:
space:
mode:
authorVictor Stinner <victor.stinner@haypocalc.com>2010-11-09 01:08:59 (GMT)
committerVictor Stinner <victor.stinner@haypocalc.com>2010-11-09 01:08:59 (GMT)
commit58c0752a33253641c1423fac2d4ef3f623fbcb46 (patch)
tree2e2ada02342f78d3cc58a4fe23082818c4025b1b /Lib/test/test_tokenize.py
parentae4836df6d0ea92d778ef30bd37417d048fc37fc (diff)
downloadcpython-58c0752a33253641c1423fac2d4ef3f623fbcb46.zip
cpython-58c0752a33253641c1423fac2d4ef3f623fbcb46.tar.gz
cpython-58c0752a33253641c1423fac2d4ef3f623fbcb46.tar.bz2
Issue #10335: Add tokenize.open(), detect the file encoding using
tokenize.detect_encoding() and open it in read only mode.
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r--Lib/test/test_tokenize.py23
1 files changed, 22 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 10e59b9..f98efcb 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -564,7 +564,8 @@ Non-ascii identifiers
from test import support
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
- STRING, ENDMARKER, tok_name, detect_encoding)
+ STRING, ENDMARKER, tok_name, detect_encoding,
+ open as tokenize_open)
from io import BytesIO
from unittest import TestCase
import os, sys, glob
@@ -857,6 +858,26 @@ class TestDetectEncoding(TestCase):
readline = self.get_readline((b'# coding: bad\n',))
self.assertRaises(SyntaxError, detect_encoding, readline)
+ def test_open(self):
+ filename = support.TESTFN + '.py'
+ self.addCleanup(support.unlink, filename)
+
+ # test coding cookie
+ for encoding in ('iso-8859-15', 'utf-8'):
+ with open(filename, 'w', encoding=encoding) as fp:
+ print("# coding: %s" % encoding, file=fp)
+ print("print('euro:\u20ac')", file=fp)
+ with tokenize_open(filename) as fp:
+ assert fp.encoding == encoding
+ assert fp.mode == 'r'
+
+ # test BOM (no coding cookie)
+ with open(filename, 'w', encoding='utf-8-sig') as fp:
+ print("print('euro:\u20ac')", file=fp)
+ with tokenize_open(filename) as fp:
+ assert fp.encoding == 'utf-8-sig'
+ assert fp.mode == 'r'
+
class TestTokenize(TestCase):
def test_tokenize(self):