summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorBenjamin Peterson <benjamin@python.org>2010-03-18 22:29:52 (GMT)
committerBenjamin Peterson <benjamin@python.org>2010-03-18 22:29:52 (GMT)
commit689a55809818a846d2733241642572840d20570b (patch)
tree3df23660fca4efa2d5833188fbc26ac6ee25bdc2 /Lib
parent8c8042734aa4500db9072ef56548b544d881b5b1 (diff)
downloadcpython-689a55809818a846d2733241642572840d20570b.zip
cpython-689a55809818a846d2733241642572840d20570b.tar.gz
cpython-689a55809818a846d2733241642572840d20570b.tar.bz2
in tokenize.detect_encoding(), return utf-8-sig when a BOM is found
Diffstat (limited to 'Lib')
-rw-r--r--Lib/test/test_tokenize.py10
-rw-r--r--Lib/tokenize.py18
2 files changed, 17 insertions, 11 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 7b91ab2..1bfac40 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -726,7 +726,7 @@ class TestDetectEncoding(TestCase):
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
- self.assertEquals(encoding, 'utf-8')
+ self.assertEquals(encoding, 'utf-8-sig')
self.assertEquals(consumed_lines,
[b'# something\n', b'print(something)\n'])
@@ -747,7 +747,7 @@ class TestDetectEncoding(TestCase):
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
- self.assertEquals(encoding, 'utf-8')
+ self.assertEquals(encoding, 'utf-8-sig')
self.assertEquals(consumed_lines, [b'# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
@@ -779,7 +779,7 @@ class TestDetectEncoding(TestCase):
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
- self.assertEquals(encoding, 'utf-8')
+ self.assertEquals(encoding, 'utf-8-sig')
self.assertEquals(consumed_lines,
[b'#! something\n', b'f# coding=utf-8\n'])
@@ -833,12 +833,12 @@ class TestDetectEncoding(TestCase):
readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
- self.assertEquals(encoding, 'utf-8')
+ self.assertEquals(encoding, 'utf-8-sig')
self.assertEquals(consumed_lines, [b'print(something)\n'])
readline = self.get_readline((b'\xef\xbb\xbf',))
encoding, consumed_lines = detect_encoding(readline)
- self.assertEquals(encoding, 'utf-8')
+ self.assertEquals(encoding, 'utf-8-sig')
self.assertEquals(consumed_lines, [])
readline = self.get_readline((b'# coding: bad\n',))
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index f82922b..8972137 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -301,14 +301,16 @@ def detect_encoding(readline):
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
- cookie as specified in pep-0263. If both a bom and a cookie are present,
- but disagree, a SyntaxError will be raised. If the encoding cookie is an
- invalid charset, raise a SyntaxError.
+ cookie as specified in pep-0263. If both a bom and a cookie are present, but
+ disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
+ charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
+ default = 'utf-8'
def read_or_stop():
try:
return readline()
@@ -340,8 +342,9 @@ def detect_encoding(readline):
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
+ default = 'utf-8-sig'
if not first:
- return 'utf-8', []
+ return default, []
encoding = find_cookie(first)
if encoding:
@@ -349,13 +352,13 @@ def detect_encoding(readline):
second = read_or_stop()
if not second:
- return 'utf-8', [first]
+ return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
- return 'utf-8', [first, second]
+ return default, [first, second]
def tokenize(readline):
@@ -394,6 +397,9 @@ def _tokenize(readline, encoding):
indents = [0]
if encoding is not None:
+ if encoding == "utf-8-sig":
+ # BOM will already have been stripped.
+ encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try: