summaryrefslogtreecommitdiffstats
path: root/Lib/tokenize.py
diff options
context:
space:
mode:
authorGuido van Rossum <guido@python.org>2007-11-12 17:40:10 (GMT)
committerGuido van Rossum <guido@python.org>2007-11-12 17:40:10 (GMT)
commit4fe72f9b03f92f126bec69d79ca3e9e09018c988 (patch)
tree7c12e90de06c635e220852dad053ae2848c1a746 /Lib/tokenize.py
parent1607278c2612275d21354619c6ef1e2869178ba7 (diff)
downloadcpython-4fe72f9b03f92f126bec69d79ca3e9e09018c988.zip
cpython-4fe72f9b03f92f126bec69d79ca3e9e09018c988.tar.gz
cpython-4fe72f9b03f92f126bec69d79ca3e9e09018c988.tar.bz2
Patch 1420 by Ron Adam.
This adds support for bytes literals (b'...') to tokenize.py, and removes support for unicode literals (u'...').
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r--Lib/tokenize.py36
1 files changed, 18 insertions, 18 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index a204856..0d9a3fb 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -69,10 +69,10 @@ Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
+Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
# Single-line ' or " string.
-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
@@ -90,9 +90,9 @@ PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
-ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
@@ -102,28 +102,28 @@ tokenprog, pseudoprog, single3prog, double3prog = map(
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
- "u'''": single3prog, 'u"""': double3prog,
- "ur'''": single3prog, 'ur"""': double3prog,
+ "b'''": single3prog, 'b"""': double3prog,
+ "br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
- "U'''": single3prog, 'U"""': double3prog,
- "uR'''": single3prog, 'uR"""': double3prog,
- "Ur'''": single3prog, 'Ur"""': double3prog,
- "UR'''": single3prog, 'UR"""': double3prog,
- 'r': None, 'R': None, 'u': None, 'U': None}
+ "B'''": single3prog, 'B"""': double3prog,
+ "bR'''": single3prog, 'bR"""': double3prog,
+ "Br'''": single3prog, 'Br"""': double3prog,
+ "BR'''": single3prog, 'BR"""': double3prog,
+ 'r': None, 'R': None, 'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
- "u'''", 'u"""', "U'''", 'U"""',
- "ur'''", 'ur"""', "Ur'''", 'Ur"""',
- "uR'''", 'uR"""', "UR'''", 'UR"""'):
+ "b'''", 'b"""', "B'''", 'B"""',
+ "br'''", 'br"""', "Br'''", 'Br"""',
+ "bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
- "u'", 'u"', "U'", 'U"',
- "ur'", 'ur"', "Ur'", 'Ur"',
- "uR'", 'uR"', "UR'", 'UR"' ):
+ "b'", 'b"', "B'", 'B"',
+ "br'", 'br"', "Br'", 'Br"',
+ "bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
tabsize = 8