diff options
Diffstat (limited to 'Lib')
-rw-r--r-- | Lib/codecs.py | 23 | ||||
-rw-r--r-- | Lib/encodings/aliases.py | 36 | ||||
-rw-r--r-- | Lib/encodings/base64_codec.py | 55 | ||||
-rw-r--r-- | Lib/encodings/bz2_codec.py | 77 | ||||
-rw-r--r-- | Lib/encodings/hex_codec.py | 55 | ||||
-rw-r--r-- | Lib/encodings/quopri_codec.py | 56 | ||||
-rwxr-xr-x | Lib/encodings/rot_13.py | 113 | ||||
-rw-r--r-- | Lib/encodings/uu_codec.py | 99 | ||||
-rw-r--r-- | Lib/encodings/zlib_codec.py | 77 | ||||
-rw-r--r-- | Lib/test/test_bytes.py | 5 | ||||
-rw-r--r-- | Lib/test/test_codecs.py | 62 |
11 files changed, 630 insertions, 28 deletions
diff --git a/Lib/codecs.py b/Lib/codecs.py index f6c2448..b150d64 100644 --- a/Lib/codecs.py +++ b/Lib/codecs.py @@ -396,6 +396,8 @@ class StreamWriter(Codec): class StreamReader(Codec): + charbuffertype = str + def __init__(self, stream, errors='strict'): """ Creates a StreamReader instance. @@ -417,9 +419,8 @@ class StreamReader(Codec): self.stream = stream self.errors = errors self.bytebuffer = b"" - # For str->str decoding this will stay a str - # For str->unicode decoding the first read will promote it to unicode - self.charbuffer = "" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer self.linebuffer = None def decode(self, input, errors='strict'): @@ -455,7 +456,7 @@ class StreamReader(Codec): """ # If we have lines cached, first merge them back into characters if self.linebuffer: - self.charbuffer = "".join(self.linebuffer) + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) self.linebuffer = None # read until we get the required number of characters (if available) @@ -498,7 +499,7 @@ class StreamReader(Codec): if chars < 0: # Return everything we've got result = self.charbuffer - self.charbuffer = "" + self.charbuffer = self._empty_charbuffer else: # Return the first chars characters result = self.charbuffer[:chars] @@ -529,7 +530,7 @@ class StreamReader(Codec): return line readsize = size or 72 - line = "" + line = self._empty_charbuffer # If size is given, we call read() only once while True: data = self.read(readsize, firstline=True) @@ -537,7 +538,8 @@ class StreamReader(Codec): # If we're at a "\r" read one extra character (which might # be a "\n") to get a proper line ending. If the stream is # temporarily exhausted we return the wrong line ending. - if data.endswith("\r"): + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): data += self.read(size=1, chars=1) line += data @@ -563,7 +565,8 @@ class StreamReader(Codec): line0withoutend = lines[0].splitlines(False)[0] if line0withend != line0withoutend: # We really have a line end # Put the rest back together and keep it until the next call - self.charbuffer = "".join(lines[1:]) + self.charbuffer + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer if keepends: line = line0withend else: @@ -574,7 +577,7 @@ class StreamReader(Codec): if line and not keepends: line = line.splitlines(False)[0] break - if readsize<8000: + if readsize < 8000: readsize *= 2 return line @@ -603,7 +606,7 @@ class StreamReader(Codec): """ self.bytebuffer = b"" - self.charbuffer = "" + self.charbuffer = self._empty_charbuffer self.linebuffer = None def seek(self, offset, whence=0): diff --git a/Lib/encodings/aliases.py b/Lib/encodings/aliases.py index 235deb5..331095b 100644 --- a/Lib/encodings/aliases.py +++ b/Lib/encodings/aliases.py @@ -33,9 +33,9 @@ aliases = { 'us' : 'ascii', 'us_ascii' : 'ascii', - ## base64_codec codec - #'base64' : 'base64_codec', - #'base_64' : 'base64_codec', + # base64_codec codec + 'base64' : 'base64_codec', + 'base_64' : 'base64_codec', # big5 codec 'big5_tw' : 'big5', @@ -45,8 +45,8 @@ aliases = { 'big5_hkscs' : 'big5hkscs', 'hkscs' : 'big5hkscs', - ## bz2_codec codec - #'bz2' : 'bz2_codec', + # bz2_codec codec + 'bz2' : 'bz2_codec', # cp037 codec '037' : 'cp037', @@ -248,8 +248,8 @@ aliases = { 'cp936' : 'gbk', 'ms936' : 'gbk', - ## hex_codec codec - #'hex' : 'hex_codec', + # hex_codec codec + 'hex' : 'hex_codec', # hp_roman8 codec 'roman8' : 'hp_roman8', @@ -450,13 +450,13 @@ aliases = { 'cp154' : 'ptcp154', 'cyrillic_asian' : 'ptcp154', - ## quopri_codec codec - #'quopri' : 'quopri_codec', - #'quoted_printable' : 'quopri_codec', - #'quotedprintable' : 'quopri_codec', + # quopri_codec codec + 'quopri' : 'quopri_codec', + 'quoted_printable' : 'quopri_codec', + 'quotedprintable' : 'quopri_codec', - ## rot_13 codec - #'rot13' : 'rot_13', + # rot_13 codec + 'rot13' : 'rot_13', # shift_jis codec 'csshiftjis' : 'shift_jis', @@ -518,12 +518,12 @@ aliases = { 'utf8_ucs2' : 'utf_8', 'utf8_ucs4' : 'utf_8', - ## uu_codec codec - #'uu' : 'uu_codec', + # uu_codec codec + 'uu' : 'uu_codec', - ## zlib_codec codec - #'zip' : 'zlib_codec', - #'zlib' : 'zlib_codec', + # zlib_codec codec + 'zip' : 'zlib_codec', + 'zlib' : 'zlib_codec', # temporary mac CJK aliases, will be replaced by proper codecs in 3.1 'x_mac_japanese' : 'shift_jis', diff --git a/Lib/encodings/base64_codec.py b/Lib/encodings/base64_codec.py new file mode 100644 index 0000000..e8b19ee --- /dev/null +++ b/Lib/encodings/base64_codec.py @@ -0,0 +1,55 @@ +"""Python 'base64_codec' Codec - base64 content transfer encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import base64 + +### Codec APIs + +def base64_encode(input, errors='strict'): + assert errors == 'strict' + return (base64.encodestring(input), len(input)) + +def base64_decode(input, errors='strict'): + assert errors == 'strict' + return (base64.decodestring(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return base64_encode(input, errors) + def decode(self, input, errors='strict'): + return base64_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + assert self.errors == 'strict' + return base64.encodestring(input) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + assert self.errors == 'strict' + return base64.decodestring(input) + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='base64', + encode=base64_encode, + decode=base64_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/Lib/encodings/bz2_codec.py b/Lib/encodings/bz2_codec.py new file mode 100644 index 0000000..e65d226 --- /dev/null +++ b/Lib/encodings/bz2_codec.py @@ -0,0 +1,77 @@ +"""Python 'bz2_codec' Codec - bz2 compression encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). + +Adapted by Raymond Hettinger from zlib_codec.py which was written +by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import bz2 # this codec needs the optional bz2 module ! + +### Codec APIs + +def bz2_encode(input, errors='strict'): + assert errors == 'strict' + return (bz2.compress(input), len(input)) + +def bz2_decode(input, errors='strict'): + assert errors == 'strict' + return (bz2.decompress(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return bz2_encode(input, errors) + def decode(self, input, errors='strict'): + return bz2_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.compressobj = bz2.BZ2Compressor() + + def encode(self, input, final=False): + if final: + c = self.compressobj.compress(input) + return c + self.compressobj.flush() + else: + return self.compressobj.compress(input) + + def reset(self): + self.compressobj = bz2.BZ2Compressor() + +class IncrementalDecoder(codecs.IncrementalDecoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.decompressobj = bz2.BZ2Decompressor() + + def decode(self, input, final=False): + try: + return self.decompressobj.decompress(input) + except EOFError: + return '' + + def reset(self): + self.decompressobj = bz2.BZ2Decompressor() + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name="bz2", + encode=bz2_encode, + decode=bz2_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/Lib/encodings/hex_codec.py b/Lib/encodings/hex_codec.py new file mode 100644 index 0000000..e003fc3 --- /dev/null +++ b/Lib/encodings/hex_codec.py @@ -0,0 +1,55 @@ +"""Python 'hex_codec' Codec - 2-digit hex content transfer encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import binascii + +### Codec APIs + +def hex_encode(input, errors='strict'): + assert errors == 'strict' + return (binascii.b2a_hex(input), len(input)) + +def hex_decode(input, errors='strict'): + assert errors == 'strict' + return (binascii.a2b_hex(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return hex_encode(input, errors) + def decode(self, input, errors='strict'): + return hex_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + assert self.errors == 'strict' + return binascii.b2a_hex(input) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + assert self.errors == 'strict' + return binascii.a2b_hex(input) + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='hex', + encode=hex_encode, + decode=hex_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/Lib/encodings/quopri_codec.py b/Lib/encodings/quopri_codec.py new file mode 100644 index 0000000..9243fc4 --- /dev/null +++ b/Lib/encodings/quopri_codec.py @@ -0,0 +1,56 @@ +"""Codec for quoted-printable encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). +""" + +import codecs +import quopri +from io import BytesIO + +def quopri_encode(input, errors='strict'): + assert errors == 'strict' + f = BytesIO(input) + g = BytesIO() + quopri.encode(f, g, 1) + return (g.getvalue(), len(input)) + +def quopri_decode(input, errors='strict'): + assert errors == 'strict' + f = BytesIO(input) + g = BytesIO() + quopri.decode(f, g) + return (g.getvalue(), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return quopri_encode(input, errors) + def decode(self, input, errors='strict'): + return quopri_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return quopri_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return quopri_decode(input, self.errors)[0] + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +# encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='quopri', + encode=quopri_encode, + decode=quopri_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/Lib/encodings/rot_13.py b/Lib/encodings/rot_13.py new file mode 100755 index 0000000..3140c14 --- /dev/null +++ b/Lib/encodings/rot_13.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +""" Python Character Mapping Codec for ROT13. + +This codec de/encodes from str to str and is therefore usable with +str.transform() and str.untransform(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return (input.translate(rot13_map), len(input)) + + def decode(self, input, errors='strict'): + return (input.translate(rot13_map), len(input)) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return input.translate(rot13_map) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return input.translate(rot13_map) + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='rot-13', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) + +### Map + +rot13_map = codecs.make_identity_dict(range(256)) +rot13_map.update({ + 0x0041: 0x004e, + 0x0042: 0x004f, + 0x0043: 0x0050, + 0x0044: 0x0051, + 0x0045: 0x0052, + 0x0046: 0x0053, + 0x0047: 0x0054, + 0x0048: 0x0055, + 0x0049: 0x0056, + 0x004a: 0x0057, + 0x004b: 0x0058, + 0x004c: 0x0059, + 0x004d: 0x005a, + 0x004e: 0x0041, + 0x004f: 0x0042, + 0x0050: 0x0043, + 0x0051: 0x0044, + 0x0052: 0x0045, + 0x0053: 0x0046, + 0x0054: 0x0047, + 0x0055: 0x0048, + 0x0056: 0x0049, + 0x0057: 0x004a, + 0x0058: 0x004b, + 0x0059: 0x004c, + 0x005a: 0x004d, + 0x0061: 0x006e, + 0x0062: 0x006f, + 0x0063: 0x0070, + 0x0064: 0x0071, + 0x0065: 0x0072, + 0x0066: 0x0073, + 0x0067: 0x0074, + 0x0068: 0x0075, + 0x0069: 0x0076, + 0x006a: 0x0077, + 0x006b: 0x0078, + 0x006c: 0x0079, + 0x006d: 0x007a, + 0x006e: 0x0061, + 0x006f: 0x0062, + 0x0070: 0x0063, + 0x0071: 0x0064, + 0x0072: 0x0065, + 0x0073: 0x0066, + 0x0074: 0x0067, + 0x0075: 0x0068, + 0x0076: 0x0069, + 0x0077: 0x006a, + 0x0078: 0x006b, + 0x0079: 0x006c, + 0x007a: 0x006d, +}) + +### Filter API + +def rot13(infile, outfile): + outfile.write(infile.read().encode('rot-13')) + +if __name__ == '__main__': + import sys + rot13(sys.stdin, sys.stdout) diff --git a/Lib/encodings/uu_codec.py b/Lib/encodings/uu_codec.py new file mode 100644 index 0000000..69c6f17 --- /dev/null +++ b/Lib/encodings/uu_codec.py @@ -0,0 +1,99 @@ +"""Python 'uu_codec' Codec - UU content transfer encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were +adapted from uu.py which was written by Lance Ellinghouse and +modified by Jack Jansen and Fredrik Lundh. +""" + +import codecs +import binascii +from io import BytesIO + +### Codec APIs + +def uu_encode(input, errors='strict', filename='<data>', mode=0o666): + assert errors == 'strict' + infile = BytesIO(input) + outfile = BytesIO() + read = infile.read + write = outfile.write + + # Encode + write(('begin %o %s\n' % (mode & 0o777, filename)).encode('ascii')) + chunk = read(45) + while chunk: + write(binascii.b2a_uu(chunk)) + chunk = read(45) + write(b' \nend\n') + + return (outfile.getvalue(), len(input)) + +def uu_decode(input, errors='strict'): + assert errors == 'strict' + infile = BytesIO(input) + outfile = BytesIO() + readline = infile.readline + write = outfile.write + + # Find start of encoded data + while 1: + s = readline() + if not s: + raise ValueError('Missing "begin" line in input data') + if s[:5] == b'begin': + break + + # Decode + while True: + s = readline() + if not s or s == b'end\n': + break + try: + data = binascii.a2b_uu(s) + except binascii.Error as v: + # Workaround for broken uuencoders by /Fredrik Lundh + nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3 + data = binascii.a2b_uu(s[:nbytes]) + #sys.stderr.write("Warning: %s\n" % str(v)) + write(data) + if not s: + raise ValueError('Truncated input data') + + return (outfile.getvalue(), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return uu_encode(input, errors) + + def decode(self, input, errors='strict'): + return uu_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return uu_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return uu_decode(input, self.errors)[0] + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='uu', + encode=uu_encode, + decode=uu_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/Lib/encodings/zlib_codec.py b/Lib/encodings/zlib_codec.py new file mode 100644 index 0000000..e0b9cda --- /dev/null +++ b/Lib/encodings/zlib_codec.py @@ -0,0 +1,77 @@ +"""Python 'zlib_codec' Codec - zlib compression encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import zlib # this codec needs the optional zlib module ! + +### Codec APIs + +def zlib_encode(input, errors='strict'): + assert errors == 'strict' + return (zlib.compress(input), len(input)) + +def zlib_decode(input, errors='strict'): + assert errors == 'strict' + return (zlib.decompress(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return zlib_encode(input, errors) + def decode(self, input, errors='strict'): + return zlib_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.compressobj = zlib.compressobj() + + def encode(self, input, final=False): + if final: + c = self.compressobj.compress(input) + return c + self.compressobj.flush() + else: + return self.compressobj.compress(input) + + def reset(self): + self.compressobj = zlib.compressobj() + +class IncrementalDecoder(codecs.IncrementalDecoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.decompressobj = zlib.decompressobj() + + def decode(self, input, final=False): + if final: + c = self.decompressobj.decompress(input) + return c + self.decompressobj.flush() + else: + return self.decompressobj.decompress(input) + + def reset(self): + self.decompressobj = zlib.decompressobj() + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='zlib', + encode=zlib_encode, + decode=zlib_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py index 24ee487..49b50f2 100644 --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -207,6 +207,11 @@ class BaseBytesTest(unittest.TestCase): self.assertEqual(b.decode(errors="ignore", encoding="utf8"), "Hello world\n") + def test_transform(self): + b1 = self.type2test(range(256)) + b2 = b1.transform("base64").untransform("base64") + self.assertEqual(b2, b1) + def test_from_int(self): b = self.type2test(0) self.assertEqual(b, self.type2test()) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py index f989a55..bc29e06 100644 --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -1659,6 +1659,67 @@ class BomTest(unittest.TestCase): self.assertEqual(f.read(), data * 2) +bytes_transform_encodings = [ + "base64_codec", + "uu_codec", + "quopri_codec", + "hex_codec", +] +try: + import zlib +except ImportError: + pass +else: + bytes_transform_encodings.append("zlib_codec") +try: + import bz2 +except ImportError: + pass +else: + bytes_transform_encodings.append("bz2_codec") + +class TransformCodecTest(unittest.TestCase): + def test_basics(self): + binput = bytes(range(256)) + ainput = bytearray(binput) + for encoding in bytes_transform_encodings: + # generic codecs interface + (o, size) = codecs.getencoder(encoding)(binput) + self.assertEqual(size, len(binput)) + (i, size) = codecs.getdecoder(encoding)(o) + self.assertEqual(size, len(o)) + self.assertEqual(i, binput) + + # transform interface + boutput = binput.transform(encoding) + aoutput = ainput.transform(encoding) + self.assertEqual(boutput, aoutput) + self.assertIsInstance(boutput, bytes) + self.assertIsInstance(aoutput, bytearray) + bback = boutput.untransform(encoding) + aback = aoutput.untransform(encoding) + self.assertEqual(bback, aback) + self.assertEqual(bback, binput) + self.assertIsInstance(bback, bytes) + self.assertIsInstance(aback, bytearray) + + def test_read(self): + for encoding in bytes_transform_encodings: + sin = b"\x80".transform(encoding) + reader = codecs.getreader(encoding)(io.BytesIO(sin)) + sout = reader.read() + self.assertEqual(sout, b"\x80") + + def test_readline(self): + for encoding in bytes_transform_encodings: + if encoding in ['uu_codec', 'zlib_codec']: + continue + sin = b"\x80".transform(encoding) + reader = codecs.getreader(encoding)(io.BytesIO(sin)) + sout = reader.readline() + self.assertEqual(sout, b"\x80") + + def test_main(): support.run_unittest( UTF32Test, @@ -1686,6 +1747,7 @@ def test_main(): TypesTest, SurrogateEscapeTest, BomTest, + TransformCodecTest, ) |