From 6ecf77b3f8c981a695d4116b010755676bcc61e2 Mon Sep 17 00:00:00 2001 From: Armin Ronacher Date: Sun, 4 Mar 2012 12:04:06 +0000 Subject: Basic support for PEP 414 without docs or tests. --- Lib/tokenize.py | 30 ++++++++++++++++++++++-------- Parser/tokenizer.c | 10 +++++++--- Python/ast.c | 3 +++ 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 4c42bbc..b7ae7d3 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -135,10 +135,10 @@ Double = r'[^"\\]*(?:\\.[^"\\]*)*"' Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""') +Triple = group("[bBuU]?[rR]?'''", '[bBuU]?[rR]?"""') # Single-line ' or " string. -String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') +String = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get @@ -156,9 +156,9 @@ PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. -ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + +ContStr = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), - r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) @@ -176,21 +176,35 @@ endpats = {"'": Single, '"': Double, "bR'''": Single3, 'bR"""': Double3, "Br'''": Single3, 'Br"""': Double3, "BR'''": Single3, 'BR"""': Double3, - 'r': None, 'R': None, 'b': None, 'B': None} + "u'''": Single3, 'u"""': Double3, + "ur'''": Single3, 'ur"""': Double3, + "R'''": Single3, 'R"""': Double3, + "U'''": Single3, 'U"""': Double3, + "uR'''": Single3, 'uR"""': Double3, + "Ur'''": Single3, 'Ur"""': Double3, + "UR'''": Single3, 'UR"""': Double3, + 'r': None, 'R': None, 'b': None, 'B': None, + 'u': None, 'U': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', - "bR'''", 'bR"""', "BR'''", 'BR"""'): + "bR'''", 'bR"""', "BR'''", 'BR"""', + "u'''", 'u"""', "U'''", 'U"""', + "ur'''", 'ur"""', "Ur'''", 'Ur"""', + "uR'''", 'uR"""', "UR'''", 'UR"""'): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', - "bR'", 'bR"', "BR'", 'BR"' ): + "bR'", 'bR"', "BR'", 'BR"' , + "u'", 'u"', "U'", 'U"', + "ur'", 'ur"', "Ur'", 'Ur"', + "uR'", 'uR"', "UR'", 'UR"' ): single_quoted[t] = t tabsize = 8 diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 55f4313..36ca079 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -1412,11 +1412,15 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end) /* Identifier (most frequent token!) */ nonascii = 0; if (is_potential_identifier_start(c)) { - /* Process b"", r"", br"" and rb"" */ - int saw_b = 0, saw_r = 0; + /* Process b"", r"", u"", br"", rb"" and ur"" */ + int saw_b = 0, saw_r = 0, saw_u = 0; while (1) { - if (!saw_b && (c == 'b' || c == 'B')) + if (!(saw_b || saw_u) && (c == 'b' || c == 'B')) saw_b = 1; + /* Since this is a backwards compatibility support literal we don't + want to support it in arbitrary order like byte literals. */ + else if (!(saw_b || saw_u || saw_r) && (c == 'u' || c == 'U')) + saw_u = 1; else if (!saw_r && (c == 'r' || c == 'R')) saw_r = 1; else diff --git a/Python/ast.c b/Python/ast.c index c565642..0f93098 100644 --- a/Python/ast.c +++ b/Python/ast.c @@ -3796,6 +3796,9 @@ parsestr(struct compiling *c, const node *n, int *bytesmode) quote = *++s; *bytesmode = 1; } + else if (quote == 'u' || quote == 'U') { + quote = *++s; + } else if (quote == 'r' || quote == 'R') { quote = *++s; rawmode = 1; -- cgit v0.12