diff options
author | Ezio Melotti <ezio.melotti@gmail.com> | 2010-11-20 19:04:17 (GMT) |
---|---|---|
committer | Ezio Melotti <ezio.melotti@gmail.com> | 2010-11-20 19:04:17 (GMT) |
commit | b3aedd48621ed9d33b5f42f946b256bce4a50673 (patch) | |
tree | 2297c8ebce1b09621e1d98096c1603896d9a0f0c /Lib/test/test_tokenize.py | |
parent | b8bc439b2093add9b313bcca2cc507a2d0e87764 (diff) | |
download | cpython-b3aedd48621ed9d33b5f42f946b256bce4a50673.zip cpython-b3aedd48621ed9d33b5f42f946b256bce4a50673.tar.gz cpython-b3aedd48621ed9d33b5f42f946b256bce4a50673.tar.bz2 |
#9424: Replace deprecated assert* methods in the Python test suite.
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r-- | Lib/test/test_tokenize.py | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index da2138a..11e6fb4 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -689,8 +689,8 @@ class Test_Tokenize(TestCase): # skip the initial encoding token and the end token tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1] expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')] - self.assertEquals(tokens, expected_tokens, - "bytes not decoded with encoding") + self.assertEqual(tokens, expected_tokens, + "bytes not decoded with encoding") def test__tokenize_does_not_decode_with_encoding_none(self): literal = '"ЉЊЈЁЂ"' @@ -706,8 +706,8 @@ class Test_Tokenize(TestCase): # skip the end token tokens = list(_tokenize(readline, encoding=None))[:-1] expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')] - self.assertEquals(tokens, expected_tokens, - "string not tokenized when encoding is None") + self.assertEqual(tokens, expected_tokens, + "string not tokenized when encoding is None") class TestDetectEncoding(TestCase): @@ -730,8 +730,8 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8') - self.assertEquals(consumed_lines, list(lines[:2])) + self.assertEqual(encoding, 'utf-8') + self.assertEqual(consumed_lines, list(lines[:2])) def test_bom_no_cookie(self): lines = ( @@ -740,9 +740,9 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8-sig') - self.assertEquals(consumed_lines, - [b'# something\n', b'print(something)\n']) + self.assertEqual(encoding, 'utf-8-sig') + self.assertEqual(consumed_lines, + [b'# something\n', b'print(something)\n']) def test_cookie_first_line_no_bom(self): lines = ( @@ -751,8 +751,8 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'iso-8859-1') - self.assertEquals(consumed_lines, [b'# -*- coding: latin-1 -*-\n']) + self.assertEqual(encoding, 'iso-8859-1') + self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n']) def test_matched_bom_and_cookie_first_line(self): lines = ( @@ -761,8 +761,8 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8-sig') - self.assertEquals(consumed_lines, [b'# coding=utf-8\n']) + self.assertEqual(encoding, 'utf-8-sig') + self.assertEqual(consumed_lines, [b'# coding=utf-8\n']) def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self): lines = ( @@ -781,9 +781,9 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'ascii') + self.assertEqual(encoding, 'ascii') expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n'] - self.assertEquals(consumed_lines, expected) + self.assertEqual(consumed_lines, expected) def test_matched_bom_and_cookie_second_line(self): lines = ( @@ -793,9 +793,9 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8-sig') - self.assertEquals(consumed_lines, - [b'#! something\n', b'f# coding=utf-8\n']) + self.assertEqual(encoding, 'utf-8-sig') + self.assertEqual(consumed_lines, + [b'#! something\n', b'f# coding=utf-8\n']) def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self): lines = ( @@ -820,7 +820,7 @@ class TestDetectEncoding(TestCase): b"do_something += 4\n") rl = self.get_readline(lines) found, consumed_lines = detect_encoding(rl) - self.assertEquals(found, "iso-8859-1") + self.assertEqual(found, "iso-8859-1") def test_utf8_normalization(self): # See get_normal_name() in tokenizer.c. @@ -833,27 +833,27 @@ class TestDetectEncoding(TestCase): b"1 + 3\n") rl = self.get_readline(lines) found, consumed_lines = detect_encoding(rl) - self.assertEquals(found, "utf-8") + self.assertEqual(found, "utf-8") def test_short_files(self): readline = self.get_readline((b'print(something)\n',)) encoding, consumed_lines = detect_encoding(readline) - self.assertEquals(encoding, 'utf-8') - self.assertEquals(consumed_lines, [b'print(something)\n']) + self.assertEqual(encoding, 'utf-8') + self.assertEqual(consumed_lines, [b'print(something)\n']) encoding, consumed_lines = detect_encoding(self.get_readline(())) - self.assertEquals(encoding, 'utf-8') - self.assertEquals(consumed_lines, []) + self.assertEqual(encoding, 'utf-8') + self.assertEqual(consumed_lines, []) readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',)) encoding, consumed_lines = detect_encoding(readline) - self.assertEquals(encoding, 'utf-8-sig') - self.assertEquals(consumed_lines, [b'print(something)\n']) + self.assertEqual(encoding, 'utf-8-sig') + self.assertEqual(consumed_lines, [b'print(something)\n']) readline = self.get_readline((b'\xef\xbb\xbf',)) encoding, consumed_lines = detect_encoding(readline) - self.assertEquals(encoding, 'utf-8-sig') - self.assertEquals(consumed_lines, []) + self.assertEqual(encoding, 'utf-8-sig') + self.assertEqual(consumed_lines, []) readline = self.get_readline((b'# coding: bad\n',)) self.assertRaises(SyntaxError, detect_encoding, readline) @@ -912,7 +912,7 @@ class TestTokenize(TestCase): tokenize_module._tokenize = mock__tokenize try: results = tokenize(mock_readline) - self.assertEquals(list(results), ['first', 'second', 1, 2, 3, 4]) + self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4]) finally: tokenize_module.detect_encoding = orig_detect_encoding tokenize_module._tokenize = orig__tokenize |