summaryrefslogtreecommitdiffstats
path: root/Lib/test
diff options
context:
space:
mode:
authorSerhiy Storchaka <storchaka@gmail.com>2013-09-16 20:57:00 (GMT)
committerSerhiy Storchaka <storchaka@gmail.com>2013-09-16 20:57:00 (GMT)
commit935349406aeb9d43fecea447f0309ce63ed3a406 (patch)
tree0c0cbc4c991840c4cb1e0cf835069fe571be7c03 /Lib/test
parent3c41154331ed281514943a1d2c61fca0d89dc63c (diff)
parentdafea851901fc1de278ad79727d3b44f46ba5a31 (diff)
downloadcpython-935349406aeb9d43fecea447f0309ce63ed3a406.zip
cpython-935349406aeb9d43fecea447f0309ce63ed3a406.tar.gz
cpython-935349406aeb9d43fecea447f0309ce63ed3a406.tar.bz2
Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script
now detect Python source code encoding only in comment lines.
Diffstat (limited to 'Lib/test')
-rw-r--r--Lib/test/test_importlib/source/test_source_encoding.py6
-rw-r--r--Lib/test/test_tokenize.py7
2 files changed, 10 insertions, 3 deletions
diff --git a/Lib/test/test_importlib/source/test_source_encoding.py b/Lib/test/test_importlib/source/test_source_encoding.py
index 0ca5195..ba02b44 100644
--- a/Lib/test/test_importlib/source/test_source_encoding.py
+++ b/Lib/test/test_importlib/source/test_source_encoding.py
@@ -10,7 +10,7 @@ import unicodedata
import unittest
-CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)')
+CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
class EncodingTest(unittest.TestCase):
@@ -41,7 +41,7 @@ class EncodingTest(unittest.TestCase):
def create_source(self, encoding):
encoding_line = "# coding={0}".format(encoding)
- assert CODING_RE.search(encoding_line)
+ assert CODING_RE.match(encoding_line)
source_lines = [encoding_line.encode('utf-8')]
source_lines.append(self.source_line.encode(encoding))
return b'\n'.join(source_lines)
@@ -50,7 +50,7 @@ class EncodingTest(unittest.TestCase):
# Make sure that an encoding that has never been a standard one for
# Python works.
encoding_line = "# coding=koi8-r"
- assert CODING_RE.search(encoding_line)
+ assert CODING_RE.match(encoding_line)
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
self.run_test(source)
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index b4a58f0..1765085 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -946,6 +946,13 @@ class TestDetectEncoding(TestCase):
readline = self.get_readline((b'# coding: bad\n',))
self.assertRaises(SyntaxError, detect_encoding, readline)
+ def test_false_encoding(self):
+ # Issue 18873: "Encoding" detected in non-comment lines
+ readline = self.get_readline((b'print("#coding=fake")',))
+ encoding, consumed_lines = detect_encoding(readline)
+ self.assertEqual(encoding, 'utf-8')
+ self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
+
def test_open(self):
filename = support.TESTFN + '.py'
self.addCleanup(support.unlink, filename)