diff options
author | Christian Heimes <c.heimes@cheimes.de> | 2012-06-20 09:17:58 (GMT) |
---|---|---|
committer | Christian Heimes <c.heimes@cheimes.de> | 2012-06-20 09:17:58 (GMT) |
commit | 0b3847de6dbe451d38d8de940717a5a1f186c2e9 (patch) | |
tree | 62f54dd84693e7b25deae94699b8c240d824b806 /Lib/test | |
parent | 10c8791978203be95af2c4c1d7ce33496fac880c (diff) | |
download | cpython-0b3847de6dbe451d38d8de940717a5a1f186c2e9.zip cpython-0b3847de6dbe451d38d8de940717a5a1f186c2e9.tar.gz cpython-0b3847de6dbe451d38d8de940717a5a1f186c2e9.tar.bz2 |
Issue #15096: Drop support for the ur string prefix
Diffstat (limited to 'Lib/test')
-rw-r--r-- | Lib/test/test_strlit.py | 9 | ||||
-rw-r--r-- | Lib/test/test_tokenize.py | 22 |
2 files changed, 11 insertions, 20 deletions
diff --git a/Lib/test/test_strlit.py b/Lib/test/test_strlit.py index 1f041c8..07bc488 100644 --- a/Lib/test/test_strlit.py +++ b/Lib/test/test_strlit.py @@ -123,6 +123,15 @@ class TestLiterals(unittest.TestCase): self.assertRaises(SyntaxError, eval, """ rrb'' """) self.assertRaises(SyntaxError, eval, """ rbb'' """) + def test_eval_str_u(self): + self.assertEqual(eval(""" u'x' """), 'x') + self.assertEqual(eval(""" U'\u00e4' """), 'ä') + self.assertEqual(eval(""" u'\N{LATIN SMALL LETTER A WITH DIAERESIS}' """), 'ä') + self.assertRaises(SyntaxError, eval, """ ur'' """) + self.assertRaises(SyntaxError, eval, """ ru'' """) + self.assertRaises(SyntaxError, eval, """ bu'' """) + self.assertRaises(SyntaxError, eval, """ ub'' """) + def check_encoding(self, encoding, extra=""): modname = "xx_" + encoding.replace("-", "_") fn = os.path.join(self.tmpdir, modname + ".py") diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 4c2e4e2..4e798d7 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -299,24 +299,6 @@ String literals STRING 'u"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'U"abc"' (1, 9) (1, 15) - >>> dump_tokens("ur'abc' + uR'abc' + Ur'abc' + UR'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) - STRING "ur'abc'" (1, 0) (1, 7) - OP '+' (1, 8) (1, 9) - STRING "uR'abc'" (1, 10) (1, 17) - OP '+' (1, 18) (1, 19) - STRING "Ur'abc'" (1, 20) (1, 27) - OP '+' (1, 28) (1, 29) - STRING "UR'abc'" (1, 30) (1, 37) - >>> dump_tokens('ur"abc" + uR"abc" + Ur"abc" + UR"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) - STRING 'ur"abc"' (1, 0) (1, 7) - OP '+' (1, 8) (1, 9) - STRING 'uR"abc"' (1, 10) (1, 17) - OP '+' (1, 18) (1, 19) - STRING 'Ur"abc"' (1, 20) (1, 27) - OP '+' (1, 28) (1, 29) - STRING 'UR"abc"' (1, 30) (1, 37) >>> dump_tokens("b'abc' + B'abc'") ENCODING 'utf-8' (0, 0) (0, 0) @@ -642,7 +624,7 @@ Non-ascii identifiers Legacy unicode literals: - >>> dump_tokens("Örter = u'places'\\ngrün = UR'green'") + >>> dump_tokens("Örter = u'places'\\ngrün = U'green'") ENCODING 'utf-8' (0, 0) (0, 0) NAME 'Örter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) @@ -650,7 +632,7 @@ Legacy unicode literals: NEWLINE '\\n' (1, 17) (1, 18) NAME 'grün' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) - STRING "UR'green'" (2, 7) (2, 16) + STRING "U'green'" (2, 7) (2, 15) """ from test import support |