diff options
Diffstat (limited to 'Lib/zipfile.py')
-rw-r--r-- | Lib/zipfile.py | 651 |
1 files changed, 399 insertions, 252 deletions
diff --git a/Lib/zipfile.py b/Lib/zipfile.py index bda6134..85bdaa9 100644 --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -13,6 +13,7 @@ import stat import shutil import struct import binascii +import threading try: @@ -355,6 +356,28 @@ class ZipInfo (object): # compress_size Size of the compressed file # file_size Size of the uncompressed file + def __repr__(self): + result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] + if self.compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self.compress_type, + self.compress_type)) + hi = self.external_attr >> 16 + lo = self.external_attr & 0xFFFF + if hi: + result.append(' filemode=%r' % stat.filemode(hi)) + if lo: + result.append(' external_attr=%#x' % lo) + isdir = self.filename[-1:] == '/' + if not isdir or self.file_size: + result.append(' file_size=%r' % self.file_size) + if ((not isdir or self.compress_size) and + (self.compress_type != ZIP_STORED or + self.file_size != self.compress_size)): + result.append(' compress_size=%r' % self.compress_size) + result.append('>') + return ''.join(result) + def FileHeader(self, zip64=None): """Return the per-file header as a string.""" dt = self.date_time @@ -624,6 +647,47 @@ def _get_decompressor(compress_type): raise NotImplementedError("compression type %d" % (compress_type,)) +class _SharedFile: + def __init__(self, file, pos, close, lock): + self._file = file + self._pos = pos + self._close = close + self._lock = lock + + def read(self, n=-1): + with self._lock: + self._file.seek(self._pos) + data = self._file.read(n) + self._pos = self._file.tell() + return data + + def close(self): + if self._file is not None: + fileobj = self._file + self._file = None + self._close(fileobj) + +# Provide the tell method for unseekable stream +class _Tellable: + def __init__(self, fp): + self.fp = fp + self.offset = 0 + + def write(self, data): + n = self.fp.write(data) + self.offset += n + return n + + def tell(self): + return self.offset + + def flush(self): + self.fp.flush() + + def close(self): + self.fp.close() + + class ZipExtFile(io.BufferedIOBase): """File-like object for reading an archive member. Is returned by ZipFile.open(). @@ -671,6 +735,20 @@ class ZipExtFile(io.BufferedIOBase): else: self._expected_crc = None + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if not self.closed: + result.append(' name=%r mode=%r' % (self.name, self.mode)) + if self._compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self._compress_type, + self._compress_type)) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + def readline(self, limit=-1): """Read and return a line from the stream. @@ -884,7 +962,8 @@ class ZipFile: file: Either the path to the file, or a file-like object. If it is a path, the file will be opened and closed by ZipFile. - mode: The mode can be either read "r", write "w" or append "a". + mode: The mode can be either read 'r', write 'w', exclusive create 'x', + or append 'a'. compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib), ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma). allowZip64: if True ZipFile will create files with ZIP64 extensions when @@ -897,9 +976,10 @@ class ZipFile: _windows_illegal_name_trans_table = None def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True): - """Open the ZIP file with mode read "r", write "w" or append "a".""" - if mode not in ("r", "w", "a"): - raise RuntimeError('ZipFile() requires mode "r", "w", or "a"') + """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x', + or append 'a'.""" + if mode not in ('r', 'w', 'x', 'a'): + raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'") _check_compression(compression) @@ -909,7 +989,7 @@ class ZipFile: self.NameToInfo = {} # Find file info given name self.filelist = [] # List of ZipInfo instances for archive self.compression = compression # Method of compression - self.mode = key = mode.replace('b', '')[0] + self.mode = mode self.pwd = None self._comment = b'' @@ -918,33 +998,51 @@ class ZipFile: # No, it's a filename self._filePassed = 0 self.filename = file - modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'} - try: - self.fp = io.open(file, modeDict[mode]) - except OSError: - if mode == 'a': - mode = key = 'w' - self.fp = io.open(file, modeDict[mode]) - else: + modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b', + 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'} + filemode = modeDict[mode] + while True: + try: + self.fp = io.open(file, filemode) + except OSError: + if filemode in modeDict: + filemode = modeDict[filemode] + continue raise + break else: self._filePassed = 1 self.fp = file self.filename = getattr(file, 'name', None) + self._fileRefCnt = 1 + self._lock = threading.RLock() + self._seekable = True try: - if key == 'r': + if mode == 'r': self._RealGetContents() - elif key == 'w': + elif mode in ('w', 'x'): # set the modified flag so central directory gets written # even if no files are added to the archive self._didModify = True - elif key == 'a': + try: + self.start_dir = self.fp.tell() + except (AttributeError, OSError): + self.fp = _Tellable(self.fp) + self.start_dir = 0 + self._seekable = False + else: + # Some file-like objects can provide tell() but not seek() + try: + self.fp.seek(self.start_dir) + except (AttributeError, OSError): + self._seekable = False + elif mode == 'a': try: # See if file is a zip file self._RealGetContents() # seek to start of directory and overwrite - self.fp.seek(self.start_dir, 0) + self.fp.seek(self.start_dir) except BadZipFile: # file is not a zip file, just append self.fp.seek(0, 2) @@ -952,13 +1050,13 @@ class ZipFile: # set the modified flag so central directory gets written # even if no files are added to the archive self._didModify = True + self.start_dir = self.fp.tell() else: - raise RuntimeError('Mode must be "r", "w" or "a"') + raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'") except: fp = self.fp self.fp = None - if not self._filePassed: - fp.close() + self._fpclose(fp) raise def __enter__(self): @@ -967,6 +1065,20 @@ class ZipFile: def __exit__(self, type, value, traceback): self.close() + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if self.fp is not None: + if self._filePassed: + result.append(' file=%r' % self.fp) + elif self.filename is not None: + result.append(' filename=%r' % self.filename) + result.append(' mode=%r' % self.mode) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp @@ -1131,23 +1243,17 @@ class ZipFile: raise RuntimeError( "Attempt to read ZIP archive that was already closed") - # Only open a new file for instances where we were not - # given a file object in the constructor - if self._filePassed: - zef_file = self.fp + # Make sure we have an info object + if isinstance(name, ZipInfo): + # 'name' is already an info object + zinfo = name else: - zef_file = io.open(self.filename, 'rb') + # Get info object for name + zinfo = self.getinfo(name) + self._fileRefCnt += 1 + zef_file = _SharedFile(self.fp, zinfo.header_offset, self._fpclose, self._lock) try: - # Make sure we have an info object - if isinstance(name, ZipInfo): - # 'name' is already an info object - zinfo = name - else: - # Get info object for name - zinfo = self.getinfo(name) - zef_file.seek(zinfo.header_offset, 0) - # Skip the file header: fheader = zef_file.read(sizeFileHeader) if len(fheader) != sizeFileHeader: @@ -1206,11 +1312,9 @@ class ZipFile: if h[11] != check_byte: raise RuntimeError("Bad password for file", name) - return ZipExtFile(zef_file, mode, zinfo, zd, - close_fileobj=not self._filePassed) + return ZipExtFile(zef_file, mode, zinfo, zd, True) except: - if not self._filePassed: - zef_file.close() + zef_file.close() raise def extract(self, member, path=None, pwd=None): @@ -1298,8 +1402,8 @@ class ZipFile: if zinfo.filename in self.NameToInfo: import warnings warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) - if self.mode not in ("w", "a"): - raise RuntimeError('write() requires mode "w" or "a"') + if self.mode not in ('w', 'x', 'a'): + raise RuntimeError("write() requires mode 'w', 'x', or 'a'") if not self.fp: raise RuntimeError( "Attempt to write ZIP archive that was already closed") @@ -1344,66 +1448,79 @@ class ZipFile: zinfo.file_size = st.st_size zinfo.flag_bits = 0x00 - zinfo.header_offset = self.fp.tell() # Start of header bytes - if zinfo.compress_type == ZIP_LZMA: - # Compressed data includes an end-of-stream (EOS) marker - zinfo.flag_bits |= 0x02 - - self._writecheck(zinfo) - self._didModify = True - - if isdir: - zinfo.file_size = 0 - zinfo.compress_size = 0 - zinfo.CRC = 0 - zinfo.external_attr |= 0x10 # MS-DOS directory flag + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() # Start of header bytes + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= 0x02 + + self._writecheck(zinfo) + self._didModify = True + + if isdir: + zinfo.file_size = 0 + zinfo.compress_size = 0 + zinfo.CRC = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo + self.fp.write(zinfo.FileHeader(False)) + self.start_dir = self.fp.tell() + return + + cmpr = _get_compressor(zinfo.compress_type) + if not self._seekable: + zinfo.flag_bits |= 0x08 + with open(filename, "rb") as fp: + # Must overwrite CRC and sizes with correct data later + zinfo.CRC = CRC = 0 + zinfo.compress_size = compress_size = 0 + # Compressed size can be larger than uncompressed size + zip64 = self._allowZip64 and \ + zinfo.file_size * 1.05 > ZIP64_LIMIT + self.fp.write(zinfo.FileHeader(zip64)) + file_size = 0 + while 1: + buf = fp.read(1024 * 8) + if not buf: + break + file_size = file_size + len(buf) + CRC = crc32(buf, CRC) & 0xffffffff + if cmpr: + buf = cmpr.compress(buf) + compress_size = compress_size + len(buf) + self.fp.write(buf) + if cmpr: + buf = cmpr.flush() + compress_size = compress_size + len(buf) + self.fp.write(buf) + zinfo.compress_size = compress_size + else: + zinfo.compress_size = file_size + zinfo.CRC = CRC + zinfo.file_size = file_size + if zinfo.flag_bits & 0x08: + # Write CRC and file sizes after the file data + fmt = '<LQQ' if zip64 else '<LLL' + self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size, + zinfo.file_size)) + self.start_dir = self.fp.tell() + else: + if not zip64 and self._allowZip64: + if file_size > ZIP64_LIMIT: + raise RuntimeError('File size has increased during compressing') + if compress_size > ZIP64_LIMIT: + raise RuntimeError('Compressed size larger than uncompressed size') + # Seek backwards and write file header (which will now include + # correct CRC and file sizes) + self.start_dir = self.fp.tell() # Preserve current position in file + self.fp.seek(zinfo.header_offset) + self.fp.write(zinfo.FileHeader(zip64)) + self.fp.seek(self.start_dir) self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo - self.fp.write(zinfo.FileHeader(False)) - return - - cmpr = _get_compressor(zinfo.compress_type) - with open(filename, "rb") as fp: - # Must overwrite CRC and sizes with correct data later - zinfo.CRC = CRC = 0 - zinfo.compress_size = compress_size = 0 - # Compressed size can be larger than uncompressed size - zip64 = self._allowZip64 and \ - zinfo.file_size * 1.05 > ZIP64_LIMIT - self.fp.write(zinfo.FileHeader(zip64)) - file_size = 0 - while 1: - buf = fp.read(1024 * 8) - if not buf: - break - file_size = file_size + len(buf) - CRC = crc32(buf, CRC) & 0xffffffff - if cmpr: - buf = cmpr.compress(buf) - compress_size = compress_size + len(buf) - self.fp.write(buf) - if cmpr: - buf = cmpr.flush() - compress_size = compress_size + len(buf) - self.fp.write(buf) - zinfo.compress_size = compress_size - else: - zinfo.compress_size = file_size - zinfo.CRC = CRC - zinfo.file_size = file_size - if not zip64 and self._allowZip64: - if file_size > ZIP64_LIMIT: - raise RuntimeError('File size has increased during compressing') - if compress_size > ZIP64_LIMIT: - raise RuntimeError('Compressed size larger than uncompressed size') - # Seek backwards and write file header (which will now include - # correct CRC and file sizes) - position = self.fp.tell() # Preserve current position in file - self.fp.seek(zinfo.header_offset, 0) - self.fp.write(zinfo.FileHeader(zip64)) - self.fp.seek(position, 0) - self.filelist.append(zinfo) - self.NameToInfo[zinfo.filename] = zinfo def writestr(self, zinfo_or_arcname, data, compress_type=None): """Write a file into the archive. The contents is 'data', which @@ -1430,154 +1547,171 @@ class ZipFile: "Attempt to write to ZIP archive that was already closed") zinfo.file_size = len(data) # Uncompressed size - zinfo.header_offset = self.fp.tell() # Start of header data - if compress_type is not None: - zinfo.compress_type = compress_type - if zinfo.compress_type == ZIP_LZMA: - # Compressed data includes an end-of-stream (EOS) marker - zinfo.flag_bits |= 0x02 - - self._writecheck(zinfo) - self._didModify = True - zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum - co = _get_compressor(zinfo.compress_type) - if co: - data = co.compress(data) + co.flush() - zinfo.compress_size = len(data) # Compressed size - else: - zinfo.compress_size = zinfo.file_size - zip64 = zinfo.file_size > ZIP64_LIMIT or \ - zinfo.compress_size > ZIP64_LIMIT - if zip64 and not self._allowZip64: - raise LargeZipFile("Filesize would require ZIP64 extensions") - self.fp.write(zinfo.FileHeader(zip64)) - self.fp.write(data) - if zinfo.flag_bits & 0x08: - # Write CRC and file sizes after the file data - fmt = '<LQQ' if zip64 else '<LLL' - self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size, - zinfo.file_size)) - self.fp.flush() - self.filelist.append(zinfo) - self.NameToInfo[zinfo.filename] = zinfo + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() # Start of header data + if compress_type is not None: + zinfo.compress_type = compress_type + zinfo.header_offset = self.fp.tell() # Start of header data + if compress_type is not None: + zinfo.compress_type = compress_type + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= 0x02 + + self._writecheck(zinfo) + self._didModify = True + zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum + co = _get_compressor(zinfo.compress_type) + if co: + data = co.compress(data) + co.flush() + zinfo.compress_size = len(data) # Compressed size + else: + zinfo.compress_size = zinfo.file_size + zip64 = zinfo.file_size > ZIP64_LIMIT or \ + zinfo.compress_size > ZIP64_LIMIT + if zip64 and not self._allowZip64: + raise LargeZipFile("Filesize would require ZIP64 extensions") + self.fp.write(zinfo.FileHeader(zip64)) + self.fp.write(data) + if zinfo.flag_bits & 0x08: + # Write CRC and file sizes after the file data + fmt = '<LQQ' if zip64 else '<LLL' + self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size, + zinfo.file_size)) + self.fp.flush() + self.start_dir = self.fp.tell() + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo def __del__(self): """Call the "close()" method in case the user forgot.""" self.close() def close(self): - """Close the file, and for mode "w" and "a" write the ending + """Close the file, and for mode 'w', 'x' and 'a' write the ending records.""" if self.fp is None: return try: - if self.mode in ("w", "a") and self._didModify: # write ending records - pos1 = self.fp.tell() - for zinfo in self.filelist: # write central directory - dt = zinfo.date_time - dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] - dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) - extra = [] - if zinfo.file_size > ZIP64_LIMIT \ - or zinfo.compress_size > ZIP64_LIMIT: - extra.append(zinfo.file_size) - extra.append(zinfo.compress_size) - file_size = 0xffffffff - compress_size = 0xffffffff - else: - file_size = zinfo.file_size - compress_size = zinfo.compress_size + if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + self._write_end_record() + finally: + fp = self.fp + self.fp = None + self._fpclose(fp) + + def _write_end_record(self): + for zinfo in self.filelist: # write central directory + dt = zinfo.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + extra = [] + if zinfo.file_size > ZIP64_LIMIT \ + or zinfo.compress_size > ZIP64_LIMIT: + extra.append(zinfo.file_size) + extra.append(zinfo.compress_size) + file_size = 0xffffffff + compress_size = 0xffffffff + else: + file_size = zinfo.file_size + compress_size = zinfo.compress_size - if zinfo.header_offset > ZIP64_LIMIT: - extra.append(zinfo.header_offset) - header_offset = 0xffffffff - else: - header_offset = zinfo.header_offset + if zinfo.header_offset > ZIP64_LIMIT: + extra.append(zinfo.header_offset) + header_offset = 0xffffffff + else: + header_offset = zinfo.header_offset - extra_data = zinfo.extra - min_version = 0 - if extra: - # Append a ZIP64 field to the extra's - extra_data = struct.pack( - '<HH' + 'Q'*len(extra), - 1, 8*len(extra), *extra) + extra_data + extra_data = zinfo.extra + min_version = 0 + if extra: + # Append a ZIP64 field to the extra's + extra_data = struct.pack( + '<HH' + 'Q'*len(extra), + 1, 8*len(extra), *extra) + extra_data - min_version = ZIP64_VERSION + min_version = ZIP64_VERSION - if zinfo.compress_type == ZIP_BZIP2: - min_version = max(BZIP2_VERSION, min_version) - elif zinfo.compress_type == ZIP_LZMA: - min_version = max(LZMA_VERSION, min_version) + if zinfo.compress_type == ZIP_BZIP2: + min_version = max(BZIP2_VERSION, min_version) + elif zinfo.compress_type == ZIP_LZMA: + min_version = max(LZMA_VERSION, min_version) - extract_version = max(min_version, zinfo.extract_version) - create_version = max(min_version, zinfo.create_version) - try: - filename, flag_bits = zinfo._encodeFilenameFlags() - centdir = struct.pack(structCentralDir, - stringCentralDir, create_version, - zinfo.create_system, extract_version, zinfo.reserved, - flag_bits, zinfo.compress_type, dostime, dosdate, - zinfo.CRC, compress_size, file_size, - len(filename), len(extra_data), len(zinfo.comment), - 0, zinfo.internal_attr, zinfo.external_attr, - header_offset) - except DeprecationWarning: - print((structCentralDir, stringCentralDir, create_version, - zinfo.create_system, extract_version, zinfo.reserved, - zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, - zinfo.CRC, compress_size, file_size, - len(zinfo.filename), len(extra_data), len(zinfo.comment), - 0, zinfo.internal_attr, zinfo.external_attr, - header_offset), file=sys.stderr) - raise - self.fp.write(centdir) - self.fp.write(filename) - self.fp.write(extra_data) - self.fp.write(zinfo.comment) - - pos2 = self.fp.tell() - # Write end-of-zip-archive record - centDirCount = len(self.filelist) - centDirSize = pos2 - pos1 - centDirOffset = pos1 - requires_zip64 = None - if centDirCount > ZIP_FILECOUNT_LIMIT: - requires_zip64 = "Files count" - elif centDirOffset > ZIP64_LIMIT: - requires_zip64 = "Central directory offset" - elif centDirSize > ZIP64_LIMIT: - requires_zip64 = "Central directory size" - if requires_zip64: - # Need to write the ZIP64 end-of-archive records - if not self._allowZip64: - raise LargeZipFile(requires_zip64 + - " would require ZIP64 extensions") - zip64endrec = struct.pack( - structEndArchive64, stringEndArchive64, - 44, 45, 45, 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset) - self.fp.write(zip64endrec) - - zip64locrec = struct.pack( - structEndArchive64Locator, - stringEndArchive64Locator, 0, pos2, 1) - self.fp.write(zip64locrec) - centDirCount = min(centDirCount, 0xFFFF) - centDirSize = min(centDirSize, 0xFFFFFFFF) - centDirOffset = min(centDirOffset, 0xFFFFFFFF) - - endrec = struct.pack(structEndArchive, stringEndArchive, - 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset, len(self._comment)) - self.fp.write(endrec) - self.fp.write(self._comment) - self.fp.flush() - finally: - fp = self.fp - self.fp = None - if not self._filePassed: - fp.close() + extract_version = max(min_version, zinfo.extract_version) + create_version = max(min_version, zinfo.create_version) + try: + filename, flag_bits = zinfo._encodeFilenameFlags() + centdir = struct.pack(structCentralDir, + stringCentralDir, create_version, + zinfo.create_system, extract_version, zinfo.reserved, + flag_bits, zinfo.compress_type, dostime, dosdate, + zinfo.CRC, compress_size, file_size, + len(filename), len(extra_data), len(zinfo.comment), + 0, zinfo.internal_attr, zinfo.external_attr, + header_offset) + except DeprecationWarning: + print((structCentralDir, stringCentralDir, create_version, + zinfo.create_system, extract_version, zinfo.reserved, + zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, + zinfo.CRC, compress_size, file_size, + len(zinfo.filename), len(extra_data), len(zinfo.comment), + 0, zinfo.internal_attr, zinfo.external_attr, + header_offset), file=sys.stderr) + raise + self.fp.write(centdir) + self.fp.write(filename) + self.fp.write(extra_data) + self.fp.write(zinfo.comment) + + pos2 = self.fp.tell() + # Write end-of-zip-archive record + centDirCount = len(self.filelist) + centDirSize = pos2 - self.start_dir + centDirOffset = self.start_dir + requires_zip64 = None + if centDirCount > ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: + # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, + 44, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + + zip64locrec = struct.pack( + structEndArchive64Locator, + stringEndArchive64Locator, 0, pos2, 1) + self.fp.write(zip64locrec) + centDirCount = min(centDirCount, 0xFFFF) + centDirSize = min(centDirSize, 0xFFFFFFFF) + centDirOffset = min(centDirOffset, 0xFFFFFFFF) + + endrec = struct.pack(structEndArchive, stringEndArchive, + 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset, len(self._comment)) + self.fp.write(endrec) + self.fp.write(self._comment) + self.fp.flush() + + def _fpclose(self, fp): + assert self._fileRefCnt > 0 + self._fileRefCnt -= 1 + if not self._fileRefCnt and not self._filePassed: + fp.close() class PyZipFile(ZipFile): @@ -1597,7 +1731,7 @@ class PyZipFile(ZipFile): the modules into the archive. If pathname is a plain directory, listdir *.py and enter all modules. Else, pathname must be a Python *.py file and the module will be put into the - archive. Added modules are always module.pyo or module.pyc. + archive. Added modules are always module.pyc. This method will compile the module.py into module.pyc if necessary. If filterfunc(pathname) is given, it is called with every argument. @@ -1690,46 +1824,59 @@ class PyZipFile(ZipFile): file_py = pathname + ".py" file_pyc = pathname + ".pyc" - file_pyo = pathname + ".pyo" - pycache_pyc = importlib.util.cache_from_source(file_py, True) - pycache_pyo = importlib.util.cache_from_source(file_py, False) + pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') + pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) + pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) if self._optimize == -1: # legacy mode: use whatever file is present - if (os.path.isfile(file_pyo) and - os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime): - # Use .pyo file. - arcname = fname = file_pyo - elif (os.path.isfile(file_pyc) and + if (os.path.isfile(file_pyc) and os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): # Use .pyc file. arcname = fname = file_pyc - elif (os.path.isfile(pycache_pyc) and - os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime): + elif (os.path.isfile(pycache_opt0) and + os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): # Use the __pycache__/*.pyc file, but write it to the legacy pyc # file name in the archive. - fname = pycache_pyc + fname = pycache_opt0 arcname = file_pyc - elif (os.path.isfile(pycache_pyo) and - os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyo file, but write it to the legacy pyo + elif (os.path.isfile(pycache_opt1) and + os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt1 + arcname = file_pyc + elif (os.path.isfile(pycache_opt2) and + os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc # file name in the archive. - fname = pycache_pyo - arcname = file_pyo + fname = pycache_opt2 + arcname = file_pyc else: # Compile py into PEP 3147 pyc file. if _compile(file_py): - fname = (pycache_pyc if __debug__ else pycache_pyo) - arcname = (file_pyc if __debug__ else file_pyo) + if sys.flags.optimize == 0: + fname = pycache_opt0 + elif sys.flags.optimize == 1: + fname = pycache_opt1 + else: + fname = pycache_opt2 + arcname = file_pyc else: fname = arcname = file_py else: # new mode: use given optimization level if self._optimize == 0: - fname = pycache_pyc + fname = pycache_opt0 arcname = file_pyc else: - fname = pycache_pyo - arcname = file_pyo + arcname = file_pyc + if self._optimize == 1: + fname = pycache_opt1 + elif self._optimize == 2: + fname = pycache_opt2 + else: + msg = "invalid value for 'optimize': {!r}".format(self._optimize) + raise ValueError(msg) if not (os.path.isfile(fname) and os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): if not _compile(file_py, optimize=self._optimize): |