diff options
author | Serhiy Storchaka <storchaka@gmail.com> | 2015-03-20 14:48:02 (GMT) |
---|---|---|
committer | Serhiy Storchaka <storchaka@gmail.com> | 2015-03-20 14:48:02 (GMT) |
commit | ee4c0b9dcfb550094cca086a032d44393b5c3642 (patch) | |
tree | c04129c6917ab6ea583c417066b03afb81828552 /Lib | |
parent | 000391b7de634635b493b680f14b907d767d8583 (diff) | |
parent | 74a49ac3f5ac3c7a09c691db4888c981a0cb3232 (diff) | |
download | cpython-ee4c0b9dcfb550094cca086a032d44393b5c3642.zip cpython-ee4c0b9dcfb550094cca086a032d44393b5c3642.tar.gz cpython-ee4c0b9dcfb550094cca086a032d44393b5c3642.tar.bz2 |
Issue #23681: Fixed Python 2 to 3 poring bugs.
Indexing bytes retiurns an integer, not bytes.
Diffstat (limited to 'Lib')
-rw-r--r-- | Lib/poplib.py | 2 | ||||
-rwxr-xr-x | Lib/quopri.py | 2 | ||||
-rwxr-xr-x | Lib/smtpd.py | 2 | ||||
-rw-r--r-- | Lib/sunau.py | 7 | ||||
-rw-r--r-- | Lib/test/test_buffer.py | 8 | ||||
-rw-r--r-- | Lib/test/test_tokenize.py | 7 |
6 files changed, 13 insertions, 15 deletions
diff --git a/Lib/poplib.py b/Lib/poplib.py index 8ad9cb7..1224eac 100644 --- a/Lib/poplib.py +++ b/Lib/poplib.py @@ -136,7 +136,7 @@ class POP3: # so only possibilities are ...LF, ...CRLF, CR...LF if line[-2:] == CRLF: return line[:-2], octets - if line[0] == CR: + if line[:1] == CR: return line[1:-1], octets return line[:-1], octets diff --git a/Lib/quopri.py b/Lib/quopri.py index 46c2a4c..cbd979a 100755 --- a/Lib/quopri.py +++ b/Lib/quopri.py @@ -145,7 +145,7 @@ def decode(input, output, header=False): new = new + c; i = i+1 elif i+1 == n and not partial: partial = 1; break - elif i+1 < n and line[i+1] == ESCAPE: + elif i+1 < n and line[i+1:i+2] == ESCAPE: new = new + ESCAPE; i = i+2 elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]): new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3 diff --git a/Lib/smtpd.py b/Lib/smtpd.py index 33653d4..0fae170 100755 --- a/Lib/smtpd.py +++ b/Lib/smtpd.py @@ -154,7 +154,7 @@ class SMTPChannel(asynchat.async_chat): else: self._emptystring = b'' self._linesep = b'\r\n' - self._dotsep = b'.' + self._dotsep = ord(b'.') self._newline = b'\n' self._set_rset_state() self.seen_greeting = '' diff --git a/Lib/sunau.py b/Lib/sunau.py index 3c24492..3da41b7 100644 --- a/Lib/sunau.py +++ b/Lib/sunau.py @@ -210,12 +210,9 @@ class Au_read: self._framesize = self._framesize * self._nchannels if self._hdr_size > 24: self._info = file.read(self._hdr_size - 24) - for i in range(len(self._info)): - if self._info[i] == b'\0': - self._info = self._info[:i] - break + self._info, _, _ = self._info.partition(b'\0') else: - self._info = '' + self._info = b'' try: self._data_pos = file.tell() except (AttributeError, OSError): diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py index 0d672dd..6803156 100644 --- a/Lib/test/test_buffer.py +++ b/Lib/test/test_buffer.py @@ -150,15 +150,15 @@ def randrange_fmt(mode, char, obj): format character.""" x = randrange(*fmtdict[mode][char]) if char == 'c': - x = bytes(chr(x), 'latin1') + x = bytes([x]) + if obj == 'numpy' and x == b'\x00': + # http://projects.scipy.org/numpy/ticket/1925 + x = b'\x01' if char == '?': x = bool(x) if char == 'f' or char == 'd': x = struct.pack(char, x) x = struct.unpack(char, x)[0] - if obj == 'numpy' and x == b'\x00': - # http://projects.scipy.org/numpy/ticket/1925 - x = b'\x01' return x def gen_item(fmt, obj): diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 8f74a06..03f6148 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1068,7 +1068,7 @@ class TestTokenize(TestCase): encoding = object() encoding_used = None def mock_detect_encoding(readline): - return encoding, ['first', 'second'] + return encoding, [b'first', b'second'] def mock__tokenize(readline, encoding): nonlocal encoding_used @@ -1087,7 +1087,7 @@ class TestTokenize(TestCase): counter += 1 if counter == 5: return b'' - return counter + return str(counter).encode() orig_detect_encoding = tokenize_module.detect_encoding orig__tokenize = tokenize_module._tokenize @@ -1095,7 +1095,8 @@ class TestTokenize(TestCase): tokenize_module._tokenize = mock__tokenize try: results = tokenize(mock_readline) - self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4]) + self.assertEqual(list(results), + [b'first', b'second', b'1', b'2', b'3', b'4']) finally: tokenize_module.detect_encoding = orig_detect_encoding tokenize_module._tokenize = orig__tokenize |