summaryrefslogtreecommitdiffstats
path: root/Lib/tokenize.py
diff options
context:
space:
mode:
authorBenjamin Peterson <benjamin@python.org>2010-08-30 14:41:20 (GMT)
committerBenjamin Peterson <benjamin@python.org>2010-08-30 14:41:20 (GMT)
commit33856de84d1115a18b699e0ca93c3b921bc6a1af (patch)
treed44532b9f9225284e4dc072f4a2ba657beefd54c /Lib/tokenize.py
parente01de8f2f3f8ec2ad58e1baddb7c2c657e331796 (diff)
downloadcpython-33856de84d1115a18b699e0ca93c3b921bc6a1af.zip
cpython-33856de84d1115a18b699e0ca93c3b921bc6a1af.tar.gz
cpython-33856de84d1115a18b699e0ca93c3b921bc6a1af.tar.bz2
handle names starting with non-ascii characters correctly #9712
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r--Lib/tokenize.py15
1 files changed, 10 insertions, 5 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 519dfa5..51b49e4 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -92,7 +92,7 @@ def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'[a-zA-Z_]\w*'
+Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
@@ -142,9 +142,12 @@ ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+def _compile(expr):
+ return re.compile(expr, re.UNICODE)
+
tokenprog, pseudoprog, single3prog, double3prog = map(
- re.compile, (Token, PseudoToken, Single3, Double3))
-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+ _compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": _compile(Single), '"': _compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
@@ -171,6 +174,8 @@ for t in ("'", '"',
"bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
+del _compile
+
tabsize = 8
class TokenError(Exception): pass
@@ -393,7 +398,7 @@ def tokenize(readline):
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
- namechars, numchars = string.ascii_letters + '_', '0123456789'
+ numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
@@ -520,7 +525,7 @@ def _tokenize(readline, encoding):
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
- elif initial in namechars: # ordinary name
+ elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1