summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2013-05-12 09:29:27 (GMT)
committerGeorg Brandl <georg@python.org>2013-05-12 09:29:27 (GMT)
commit93b061bc3e1c9285ec1ce6405b85d3a1e072833f (patch)
tree8af0398de702f21e033f4d7960dc7f8a5cade00a /Lib
parenta9217a42e62dc8dcf9bb6a184bcf9d5de97d2d9e (diff)
downloadcpython-93b061bc3e1c9285ec1ce6405b85d3a1e072833f.zip
cpython-93b061bc3e1c9285ec1ce6405b85d3a1e072833f.tar.gz
cpython-93b061bc3e1c9285ec1ce6405b85d3a1e072833f.tar.bz2
Issue #1159051: Back out a fix for handling corrupted gzip files that
broke backwards compatibility.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/gzip.py73
-rw-r--r--Lib/test/test_bz2.py18
-rwxr-xr-x[-rw-r--r--]Lib/test/test_gzip.py13
3 files changed, 38 insertions, 66 deletions
diff --git a/Lib/gzip.py b/Lib/gzip.py
index 0adfd3f..6aacc9a 100644
--- a/Lib/gzip.py
+++ b/Lib/gzip.py
@@ -33,6 +33,9 @@ def write32u(output, value):
# or unsigned.
output.write(struct.pack("<L", value))
+def read32(input):
+ return struct.unpack("<I", input.read(4))[0]
+
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
@@ -256,32 +259,27 @@ class GzipFile(io.BufferedIOBase):
self.crc = zlib.crc32(b"") & 0xffffffff
self.size = 0
- def _read_exact(self, n):
- data = self.fileobj.read(n)
- while len(data) < n:
- b = self.fileobj.read(n - len(data))
- if not b:
- raise EOFError("Compressed file ended before the "
- "end-of-stream marker was reached")
- data += b
- return data
-
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic == b'':
- return False
+ raise EOFError("Reached EOF")
if magic != b'\037\213':
raise IOError('Not a gzipped file')
-
- method, flag, self.mtime = struct.unpack("<BBIxx", self._read_exact(8))
+ method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError('Unknown compression method')
+ flag = ord( self.fileobj.read(1) )
+ self.mtime = read32(self.fileobj)
+ # extraflag = self.fileobj.read(1)
+ # os = self.fileobj.read(1)
+ self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
- extra_len, = struct.unpack("<H", self._read_exact(2))
- self._read_exact(extra_len)
+ xlen = ord(self.fileobj.read(1))
+ xlen = xlen + 256*ord(self.fileobj.read(1))
+ self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
@@ -295,13 +293,12 @@ class GzipFile(io.BufferedIOBase):
if not s or s==b'\000':
break
if flag & FHCRC:
- self._read_exact(2) # Read & discard the 16-bit header CRC
+ self.fileobj.read(2) # Read & discard the 16-bit header CRC
unused = self.fileobj.unused()
if unused:
uncompress = self.decompress.decompress(unused)
self._add_read_data(uncompress)
- return True
def write(self,data):
self._check_closed()
@@ -335,16 +332,20 @@ class GzipFile(io.BufferedIOBase):
readsize = 1024
if size < 0: # get the whole thing
- while self._read(readsize):
- readsize = min(self.max_read_chunk, readsize * 2)
- size = self.extrasize
+ try:
+ while True:
+ self._read(readsize)
+ readsize = min(self.max_read_chunk, readsize * 2)
+ except EOFError:
+ size = self.extrasize
else: # just get some more of it
- while size > self.extrasize:
- if not self._read(readsize):
- if size > self.extrasize:
- size = self.extrasize
- break
- readsize = min(self.max_read_chunk, readsize * 2)
+ try:
+ while size > self.extrasize:
+ self._read(readsize)
+ readsize = min(self.max_read_chunk, readsize * 2)
+ except EOFError:
+ if size > self.extrasize:
+ size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
@@ -365,9 +366,12 @@ class GzipFile(io.BufferedIOBase):
if self.extrasize == 0:
if self.fileobj is None:
return b''
- # Ensure that we don't return b"" if we haven't reached EOF.
- # 1024 is the same buffering heuristic used in read()
- while self.extrasize == 0 and self._read(max(n, 1024)):
+ try:
+ # Ensure that we don't return b"" if we haven't reached EOF.
+ while self.extrasize == 0:
+ # 1024 is the same buffering heuristic used in read()
+ self._read(max(n, 1024))
+ except EOFError:
pass
offset = self.offset - self.extrastart
remaining = self.extrasize
@@ -380,14 +384,13 @@ class GzipFile(io.BufferedIOBase):
def _read(self, size=1024):
if self.fileobj is None:
- return False
+ raise EOFError("Reached EOF")
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
self._init_read()
- if not self._read_gzip_header():
- return False
+ self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
@@ -404,7 +407,7 @@ class GzipFile(io.BufferedIOBase):
self.fileobj.prepend(self.decompress.unused_data, True)
self._read_eof()
self._add_read_data( uncompress )
- return False
+ raise EOFError('Reached EOF')
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
@@ -420,7 +423,6 @@ class GzipFile(io.BufferedIOBase):
# a new member on the next call
self._read_eof()
self._new_member = True
- return True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffff
@@ -435,7 +437,8 @@ class GzipFile(io.BufferedIOBase):
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
- crc32, isize = struct.unpack("<II", self._read_exact(8))
+ crc32 = read32(self.fileobj)
+ isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
diff --git a/Lib/test/test_bz2.py b/Lib/test/test_bz2.py
index bcf80dc..93f71fe 100644
--- a/Lib/test/test_bz2.py
+++ b/Lib/test/test_bz2.py
@@ -292,24 +292,6 @@ class BZ2FileTest(BaseTest):
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
- def test_read_truncated(self):
- # Drop the eos_magic field (6 bytes) and CRC (4 bytes).
- truncated = self.DATA[:-10]
- with open(self.filename, 'wb') as f:
- f.write(truncated)
- with BZ2File(self.filename) as f:
- self.assertRaises(EOFError, f.read)
- with BZ2File(self.filename) as f:
- self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
- self.assertRaises(EOFError, f.read, 1)
- # Incomplete 4-byte file header, and block header of at least 146 bits.
- for i in range(22):
- with open(self.filename, 'wb') as f:
- f.write(truncated[:i])
- with BZ2File(self.filename) as f:
- self.assertRaises(EOFError, f.read, 1)
-
-
class BZ2CompressorTest(BaseTest):
def testCompress(self):
# "Test BZ2Compressor.compress()/flush()"
diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py
index b912576..a4dfac4 100644..100755
--- a/Lib/test/test_gzip.py
+++ b/Lib/test/test_gzip.py
@@ -365,19 +365,6 @@ class TestGzip(unittest.TestCase):
datac = gzip.compress(data)
self.assertEqual(gzip.decompress(datac), data)
- def test_read_truncated(self):
- data = data1*50
- # Drop the CRC (4 bytes) and file size (4 bytes).
- truncated = gzip.compress(data)[:-8]
- with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
- self.assertRaises(EOFError, f.read)
- with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
- self.assertEqual(f.read(len(data)), data)
- self.assertRaises(EOFError, f.read, 1)
- # Incomplete 10-byte header.
- for i in range(2, 10):
- with gzip.GzipFile(fileobj=io.BytesIO(truncated[:i])) as f:
- self.assertRaises(EOFError, f.read, 1)
def test_read_with_extra(self):
# Gzip data with an extra field