diff options
author | Benjamin Peterson <benjamin@python.org> | 2010-03-18 22:29:52 (GMT) |
---|---|---|
committer | Benjamin Peterson <benjamin@python.org> | 2010-03-18 22:29:52 (GMT) |
commit | 689a55809818a846d2733241642572840d20570b (patch) | |
tree | 3df23660fca4efa2d5833188fbc26ac6ee25bdc2 /Lib/tokenize.py | |
parent | 8c8042734aa4500db9072ef56548b544d881b5b1 (diff) | |
download | cpython-689a55809818a846d2733241642572840d20570b.zip cpython-689a55809818a846d2733241642572840d20570b.tar.gz cpython-689a55809818a846d2733241642572840d20570b.tar.bz2 |
in tokenize.detect_encoding(), return utf-8-sig when a BOM is found
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r-- | Lib/tokenize.py | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py index f82922b..8972137 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -301,14 +301,16 @@ def detect_encoding(readline): in. It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. + cookie as specified in pep-0263. If both a bom and a cookie are present, but + disagree, a SyntaxError will be raised. If the encoding cookie is an invalid + charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None + default = 'utf-8' def read_or_stop(): try: return readline() @@ -340,8 +342,9 @@ def detect_encoding(readline): if first.startswith(BOM_UTF8): bom_found = True first = first[3:] + default = 'utf-8-sig' if not first: - return 'utf-8', [] + return default, [] encoding = find_cookie(first) if encoding: @@ -349,13 +352,13 @@ def detect_encoding(readline): second = read_or_stop() if not second: - return 'utf-8', [first] + return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] - return 'utf-8', [first, second] + return default, [first, second] def tokenize(readline): @@ -394,6 +397,9 @@ def _tokenize(readline, encoding): indents = [0] if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') while True: # loop over lines in stream try: |