diff options
author | Serhiy Storchaka <storchaka@gmail.com> | 2013-09-16 20:51:56 (GMT) |
---|---|---|
committer | Serhiy Storchaka <storchaka@gmail.com> | 2013-09-16 20:51:56 (GMT) |
commit | dafea851901fc1de278ad79727d3b44f46ba5a31 (patch) | |
tree | 5f8d95de4856502e61c78168e7918776b161e9b4 /Lib/tokenize.py | |
parent | 975fce37883899a55bbcdaa6300c5c6ffe9d3db2 (diff) | |
download | cpython-dafea851901fc1de278ad79727d3b44f46ba5a31.zip cpython-dafea851901fc1de278ad79727d3b44f46ba5a31.tar.gz cpython-dafea851901fc1de278ad79727d3b44f46ba5a31.tar.bz2 |
Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script
now detect Python source code encoding only in comment lines.
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r-- | Lib/tokenize.py | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py index cbf91ef..f1e61d8 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -31,7 +31,7 @@ from token import * from codecs import lookup, BOM_UTF8 import collections from io import TextIOWrapper -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") +cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) import token __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", @@ -372,10 +372,10 @@ def detect_encoding(readline): msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) - matches = cookie_re.findall(line_string) - if not matches: + match = cookie_re.match(line_string) + if not match: return None - encoding = _get_normal_name(matches[0]) + encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: |