summaryrefslogtreecommitdiffstats
path: root/Lib/test
diff options
context:
space:
mode:
authorSerhiy Storchaka <storchaka@gmail.com>2015-03-20 14:48:02 (GMT)
committerSerhiy Storchaka <storchaka@gmail.com>2015-03-20 14:48:02 (GMT)
commitee4c0b9dcfb550094cca086a032d44393b5c3642 (patch)
treec04129c6917ab6ea583c417066b03afb81828552 /Lib/test
parent000391b7de634635b493b680f14b907d767d8583 (diff)
parent74a49ac3f5ac3c7a09c691db4888c981a0cb3232 (diff)
downloadcpython-ee4c0b9dcfb550094cca086a032d44393b5c3642.zip
cpython-ee4c0b9dcfb550094cca086a032d44393b5c3642.tar.gz
cpython-ee4c0b9dcfb550094cca086a032d44393b5c3642.tar.bz2
Issue #23681: Fixed Python 2 to 3 poring bugs.
Indexing bytes retiurns an integer, not bytes.
Diffstat (limited to 'Lib/test')
-rw-r--r--Lib/test/test_buffer.py8
-rw-r--r--Lib/test/test_tokenize.py7
2 files changed, 8 insertions, 7 deletions
diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py
index 0d672dd..6803156 100644
--- a/Lib/test/test_buffer.py
+++ b/Lib/test/test_buffer.py
@@ -150,15 +150,15 @@ def randrange_fmt(mode, char, obj):
format character."""
x = randrange(*fmtdict[mode][char])
if char == 'c':
- x = bytes(chr(x), 'latin1')
+ x = bytes([x])
+ if obj == 'numpy' and x == b'\x00':
+ # http://projects.scipy.org/numpy/ticket/1925
+ x = b'\x01'
if char == '?':
x = bool(x)
if char == 'f' or char == 'd':
x = struct.pack(char, x)
x = struct.unpack(char, x)[0]
- if obj == 'numpy' and x == b'\x00':
- # http://projects.scipy.org/numpy/ticket/1925
- x = b'\x01'
return x
def gen_item(fmt, obj):
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 8f74a06..03f6148 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1068,7 +1068,7 @@ class TestTokenize(TestCase):
encoding = object()
encoding_used = None
def mock_detect_encoding(readline):
- return encoding, ['first', 'second']
+ return encoding, [b'first', b'second']
def mock__tokenize(readline, encoding):
nonlocal encoding_used
@@ -1087,7 +1087,7 @@ class TestTokenize(TestCase):
counter += 1
if counter == 5:
return b''
- return counter
+ return str(counter).encode()
orig_detect_encoding = tokenize_module.detect_encoding
orig__tokenize = tokenize_module._tokenize
@@ -1095,7 +1095,8 @@ class TestTokenize(TestCase):
tokenize_module._tokenize = mock__tokenize
try:
results = tokenize(mock_readline)
- self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
+ self.assertEqual(list(results),
+ [b'first', b'second', b'1', b'2', b'3', b'4'])
finally:
tokenize_module.detect_encoding = orig_detect_encoding
tokenize_module._tokenize = orig__tokenize