summaryrefslogtreecommitdiffstats
path: root/Lib/test
diff options
context:
space:
mode:
authorMiss Islington (bot) <31488909+miss-islington@users.noreply.github.com>2023-06-09 16:58:14 (GMT)
committerGitHub <noreply@github.com>2023-06-09 16:58:14 (GMT)
commit16b1cdc87c08c01294b66257a26574725b005c50 (patch)
tree07f78346e497a5e8691556904c8cf3bddb574f63 /Lib/test
parent97d846dc2b6a790298cbfbb5669d180281cfda89 (diff)
downloadcpython-16b1cdc87c08c01294b66257a26574725b005c50.zip
cpython-16b1cdc87c08c01294b66257a26574725b005c50.tar.gz
cpython-16b1cdc87c08c01294b66257a26574725b005c50.tar.bz2
[3.12] gh-105564: Don't include artificial newlines in the line attribute of tokens (GH-105565) (#105579)
Co-authored-by: Pablo Galindo Salgado <Pablogsal@gmail.com>
Diffstat (limited to 'Lib/test')
-rw-r--r--Lib/test/test_tokenize.py16
1 files changed, 8 insertions, 8 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 6747b0d..2c124f0 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1229,7 +1229,7 @@ class Test_Tokenize(TestCase):
# skip the initial encoding token and the end tokens
tokens = list(_generate_tokens_from_c_tokenizer(readline().__next__, encoding='utf-8',
extra_tokens=True))[:-2]
- expected_tokens = [TokenInfo(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"\n')]
+ expected_tokens = [TokenInfo(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
@@ -1638,8 +1638,8 @@ class TestTokenize(TestCase):
TokenInfo(type=token.NUMBER, string='1', start=(1, 4), end=(1, 5), line='b = 1\n'),
TokenInfo(type=token.NEWLINE, string='\n', start=(1, 5), end=(1, 6), line='b = 1\n'),
TokenInfo(type=token.NL, string='\n', start=(2, 0), end=(2, 1), line='\n'),
- TokenInfo(type=token.COMMENT, string='#test', start=(3, 0), end=(3, 5), line='#test\n'),
- TokenInfo(type=token.NL, string='', start=(3, 5), end=(3, 6), line='#test\n'),
+ TokenInfo(type=token.COMMENT, string='#test', start=(3, 0), end=(3, 5), line='#test'),
+ TokenInfo(type=token.NL, string='', start=(3, 5), end=(3, 6), line='#test'),
TokenInfo(type=token.ENDMARKER, string='', start=(4, 0), end=(4, 0), line='')
]
@@ -1653,7 +1653,7 @@ class TestTokenize(TestCase):
TokenInfo(token.ENCODING, string='utf-8', start=(0, 0), end=(0, 0), line=''),
TokenInfo(token.NAME, string='a', start=(1, 0), end=(1, 1), line='a\n'),
TokenInfo(token.NEWLINE, string='\n', start=(1, 1), end=(1, 2), line='a\n'),
- TokenInfo(token.NL, string='', start=(2, 1), end=(2, 2), line=' \n'),
+ TokenInfo(token.NL, string='', start=(2, 1), end=(2, 2), line=' '),
TokenInfo(token.ENDMARKER, string='', start=(3, 0), end=(3, 0), line='')
]
@@ -1889,10 +1889,10 @@ class CTokenizeTest(TestCase):
yield "1+1".encode(encoding)
expected = [
- TokenInfo(type=NUMBER, string='1', start=(1, 0), end=(1, 1), line='1+1\n'),
- TokenInfo(type=OP, string='+', start=(1, 1), end=(1, 2), line='1+1\n'),
- TokenInfo(type=NUMBER, string='1', start=(1, 2), end=(1, 3), line='1+1\n'),
- TokenInfo(type=NEWLINE, string='', start=(1, 3), end=(1, 4), line='1+1\n'),
+ TokenInfo(type=NUMBER, string='1', start=(1, 0), end=(1, 1), line='1+1'),
+ TokenInfo(type=OP, string='+', start=(1, 1), end=(1, 2), line='1+1'),
+ TokenInfo(type=NUMBER, string='1', start=(1, 2), end=(1, 3), line='1+1'),
+ TokenInfo(type=NEWLINE, string='', start=(1, 3), end=(1, 4), line='1+1'),
TokenInfo(type=ENDMARKER, string='', start=(2, 0), end=(2, 0), line='')
]
for encoding in ["utf-8", "latin-1", "utf-16"]: