summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Peters <tim.peters@gmail.com>2001-01-15 03:34:38 (GMT)
committerTim Peters <tim.peters@gmail.com>2001-01-15 03:34:38 (GMT)
commite119006e7dc0df0a5ff6b60764b2ce3cd9477688 (patch)
treeb4c578be7bff079625b7cdc36abb1d4f0b46d8fa
parentb90f89a496676ec714e111a747344600f3988496 (diff)
downloadcpython-e119006e7dc0df0a5ff6b60764b2ce3cd9477688.zip
cpython-e119006e7dc0df0a5ff6b60764b2ce3cd9477688.tar.gz
cpython-e119006e7dc0df0a5ff6b60764b2ce3cd9477688.tar.bz2
Whitespace normalization. Top level of Lib now fixed-point for reindent.py!
-rwxr-xr-xLib/UserString.py28
-rw-r--r--Lib/urllib.py26
-rw-r--r--Lib/urllib2.py60
-rw-r--r--Lib/urlparse.py370
-rwxr-xr-xLib/uu.py10
-rw-r--r--Lib/warnings.py46
-rw-r--r--Lib/wave.py2
-rw-r--r--Lib/webbrowser.py2
-rw-r--r--Lib/whrandom.py190
-rw-r--r--Lib/xdrlib.py8
-rw-r--r--Lib/xmllib.py4
-rw-r--r--Lib/zipfile.py118
12 files changed, 432 insertions, 432 deletions
diff --git a/Lib/UserString.py b/Lib/UserString.py
index d8eec15..2d02b9b 100755
--- a/Lib/UserString.py
+++ b/Lib/UserString.py
@@ -2,7 +2,7 @@
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
-Note: string objects have grown methods in Python 1.6
+Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
from types import StringType, UnicodeType
@@ -14,7 +14,7 @@ class UserString:
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
- else:
+ else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
@@ -76,15 +76,15 @@ class UserString:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
- else:
+ else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
- def expandtabs(self, tabsize=8):
+ def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
- def find(self, sub, start=0, end=sys.maxint):
+ def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
- def index(self, sub, start=0, end=sys.maxint):
+ def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
@@ -99,23 +99,23 @@ class UserString:
def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip())
- def replace(self, old, new, maxsplit=-1):
+ def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
- def rfind(self, sub, start=0, end=sys.maxint):
+ def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
- def rindex(self, sub, start=0, end=sys.maxint):
+ def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip())
- def split(self, sep=None, maxsplit=-1):
+ def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
- def startswith(self, prefix, start=0, end=sys.maxint):
+ def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
- def translate(self, *args):
+ def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
@@ -136,7 +136,7 @@ class MutableString(UserString):
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
- def __hash__(self):
+ def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError
@@ -157,7 +157,7 @@ class MutableString(UserString):
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
-
+
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
diff --git a/Lib/urllib.py b/Lib/urllib.py
index d0031f6..e79acf0 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -551,11 +551,11 @@ class FancyURLopener(URLopener):
if match:
scheme, realm = match.groups()
if scheme.lower() == 'basic':
- name = 'retry_' + self.type + '_basic_auth'
- if data is None:
- return getattr(self,name)(url, realm)
- else:
- return getattr(self,name)(url, realm, data)
+ name = 'retry_' + self.type + '_basic_auth'
+ if data is None:
+ return getattr(self,name)(url, realm)
+ else:
+ return getattr(self,name)(url, realm, data)
def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
@@ -571,14 +571,14 @@ class FancyURLopener(URLopener):
return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None):
- host, selector = splithost(url)
- i = host.find('@') + 1
- host = host[i:]
- user, passwd = self.get_user_passwd(host, realm, i)
- if not (user or passwd): return None
- host = user + ':' + passwd + '@' + host
- newurl = '//' + host + selector
- return self.open_https(newurl)
+ host, selector = splithost(url)
+ i = host.find('@') + 1
+ host = host[i:]
+ user, passwd = self.get_user_passwd(host, realm, i)
+ if not (user or passwd): return None
+ host = user + ':' + passwd + '@' + host
+ newurl = '//' + host + selector
+ return self.open_https(newurl)
def get_user_passwd(self, host, realm, clear_cache = 0):
key = realm + '@' + host.lower()
diff --git a/Lib/urllib2.py b/Lib/urllib2.py
index 3e1e588..cf94d2f 100644
--- a/Lib/urllib2.py
+++ b/Lib/urllib2.py
@@ -1,12 +1,12 @@
"""An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
-which accepts a string containing a URL or a Request object (described
+which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirectory manages a collection of Handler objects that do
-all the actual work. Each Handler implements a particular protocol or
+all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
@@ -16,7 +16,7 @@ with digest authentication.
urlopen(url, data=None) -- basic usage is that same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
-get a file-like object back. One difference is that you can also pass
+get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
@@ -42,7 +42,7 @@ exceptions:
URLError-- a subclass of IOError, individual protocols have their own
specific subclass
-HTTPError-- also a valid HTTP response, so you can treat an HTTP error
+HTTPError-- also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response
internals:
@@ -57,7 +57,7 @@ import urllib2
authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password('realm', 'host', 'username', 'password')
-# build a new opener that adds authentication and caching FTP handlers
+# build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(authinfo, urllib2.CacheFTPHandler)
# install it
@@ -73,7 +73,7 @@ f = urllib2.urlopen('http://www.python.org/')
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
- # that hash algo that requested in the challenge, it would be good to
+ # that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# XXX to do:
@@ -141,7 +141,7 @@ def install_opener(opener):
_opener = opener
# do these error classes make sense?
-# make sure all of the IOError stuff is overridden. we just want to be
+# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
@@ -165,7 +165,7 @@ class HTTPError(URLError, addinfourl):
self.fp = fp
# XXX
self.filename = url
-
+
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
@@ -192,7 +192,7 @@ class Request:
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
- # methods getting called in a non-standard order. this may be
+ # methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr[:12] == '_Request__r_':
@@ -259,7 +259,7 @@ class OpenerDirector:
for meth in get_methods(handler):
if meth[-5:] == '_open':
protocol = meth[:-5]
- if self.handle_open.has_key(protocol):
+ if self.handle_open.has_key(protocol):
self.handle_open[protocol].append(handler)
else:
self.handle_open[protocol] = [handler]
@@ -285,7 +285,7 @@ class OpenerDirector:
if added:
self.handlers.append(handler)
handler.add_parent(self)
-
+
def __del__(self):
self.close()
@@ -314,9 +314,9 @@ class OpenerDirector:
if data is not None:
req.add_data(data)
assert isinstance(req, Request) # really only care about interface
-
+
result = self._call_chain(self.handle_open, 'default',
- 'default_open', req)
+ 'default_open', req)
if result:
return result
@@ -381,7 +381,7 @@ def get_methods(inst):
# XXX probably also want an abstract factory that knows things like
# the fact that a ProxyHandler needs to get inserted first.
# would also know when it makes sense to skip a superclass in favor of
- # a subclass and when it might make sense to include both
+ # a subclass and when it might make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
@@ -393,7 +393,7 @@ def build_opener(*handlers):
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
-
+
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
@@ -472,7 +472,7 @@ class ProxyHandler(BaseHandler):
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
- setattr(self, '%s_open' % type,
+ setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
@@ -574,7 +574,7 @@ class HTTPPasswordMgr:
if len(common) == len(base[1]):
return 1
return 0
-
+
class HTTPBasicAuthHandler(BaseHandler):
rx = re.compile('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"')
@@ -590,8 +590,8 @@ class HTTPBasicAuthHandler(BaseHandler):
# if __current_realm is not None, then the server must have
# refused our name/password and is asking for authorization
# again. must be careful to set it to None on successful
- # return.
-
+ # return.
+
def http_error_401(self, req, fp, code, msg, headers):
# XXX could be mult. headers
authreq = headers.get('www-authenticate', None)
@@ -674,7 +674,7 @@ class HTTPDigestAuthHandler(BaseHandler):
return None
user, pw = self.passwd.find_user_password(realm,
- req.get_full_url())
+ req.get_full_url())
if user is None:
return None
@@ -724,8 +724,8 @@ def encode_digest(digest):
n = ord(c) & 0xf
hexrep.append(hex(n)[-1])
return string.join(hexrep, '')
-
-
+
+
class HTTPHandler(BaseHandler):
def http_open(self, req):
# XXX devise a new mechanism to specify user/password
@@ -745,7 +745,7 @@ class HTTPHandler(BaseHandler):
h.putrequest('GET', req.get_selector())
except socket.error, err:
raise URLError(err)
-
+
# XXX proxies would have different host here
h.putheader('Host', host)
for args in self.parent.addheaders:
@@ -813,7 +813,7 @@ def parse_http_list(s):
start = i
inquote = 0
else:
- i = i + q
+ i = i + q
else:
if c < q:
list.append(s[start:i+c])
@@ -838,7 +838,7 @@ class FileHandler(BaseHandler):
names = None
def get_names(self):
if FileHandler.names is None:
- FileHandler.names = (socket.gethostbyname('localhost'),
+ FileHandler.names = (socket.gethostbyname('localhost'),
socket.gethostbyname(socket.gethostname()))
return FileHandler.names
@@ -967,7 +967,7 @@ class GopherHandler(BaseHandler):
class OpenerFactory:
default_handlers = [UnknownHandler, HTTPHandler,
- HTTPDefaultErrorHandler, HTTPRedirectHandler,
+ HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler]
proxy_handlers = [ProxyHandler]
handlers = []
@@ -990,7 +990,7 @@ class OpenerFactory:
opener.add_handler(ph)
if __name__ == "__main__":
- # XXX some of the test code depends on machine configurations that
+ # XXX some of the test code depends on machine configurations that
# are internal to CNRI. Need to set up a public server with the
# right authentication configuration for test purposes.
if socket.gethostname() == 'bitdiddle':
@@ -1030,11 +1030,11 @@ if __name__ == "__main__":
bauth = HTTPBasicAuthHandler()
bauth.add_password('basic_test_realm', localhost, 'jhylton',
- 'password')
+ 'password')
dauth = HTTPDigestAuthHandler()
- dauth.add_password('digest_test_realm', localhost, 'jhylton',
+ dauth.add_password('digest_test_realm', localhost, 'jhylton',
'password')
-
+
cfh = CacheFTPHandler()
cfh.setTimeout(1)
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index b747bc6..b9ecee1 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -6,25 +6,25 @@ UC Irvine, June 1995.
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'wais', 'file',
- 'https', 'shttp',
- 'prospero', 'rtsp', 'rtspu', '']
+ 'https', 'shttp',
+ 'prospero', 'rtsp', 'rtspu', '']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'wais',
- 'file',
- 'https', 'shttp', 'snews',
- 'prospero', 'rtsp', 'rtspu', '']
+ 'file',
+ 'https', 'shttp', 'snews',
+ 'prospero', 'rtsp', 'rtspu', '']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais',
- 'snews', 'sip',
- ]
+ 'snews', 'sip',
+ ]
uses_params = ['ftp', 'hdl', 'prospero', 'http',
- 'https', 'shttp', 'rtsp', 'rtspu', 'sip',
- '']
+ 'https', 'shttp', 'rtsp', 'rtspu', 'sip',
+ '']
uses_query = ['http', 'wais',
- 'https', 'shttp',
- 'gopher', 'rtsp', 'rtspu', 'sip',
- '']
+ 'https', 'shttp',
+ 'gopher', 'rtsp', 'rtspu', 'sip',
+ '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais',
- 'https', 'shttp', 'snews',
- 'file', 'prospero', '']
+ 'https', 'shttp', 'snews',
+ 'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
@@ -36,158 +36,158 @@ MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
- """Clear the parse cache."""
- global _parse_cache
- _parse_cache = {}
+ """Clear the parse cache."""
+ global _parse_cache
+ _parse_cache = {}
def urlparse(url, scheme = '', allow_fragments = 1):
- """Parse a URL into 6 components:
- <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
- Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
- Note that we don't break the components up in smaller bits
- (e.g. netloc is a single string) and we don't expand % escapes."""
- key = url, scheme, allow_fragments
- cached = _parse_cache.get(key, None)
- if cached:
- return cached
- if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
- clear_cache()
- netloc = path = params = query = fragment = ''
- i = url.find(':')
- if i > 0:
- if url[:i] == 'http': # optimize the common case
- scheme = url[:i].lower()
- url = url[i+1:]
- if url[:2] == '//':
- i = url.find('/', 2)
- if i < 0:
- i = len(url)
- netloc = url[2:i]
- url = url[i:]
- if allow_fragments:
- i = url.rfind('#')
- if i >= 0:
- fragment = url[i+1:]
- url = url[:i]
- i = url.find('?')
- if i >= 0:
- query = url[i+1:]
- url = url[:i]
- i = url.find(';')
- if i >= 0:
- params = url[i+1:]
- url = url[:i]
- tuple = scheme, netloc, url, params, query, fragment
- _parse_cache[key] = tuple
- return tuple
- for c in url[:i]:
- if c not in scheme_chars:
- break
- else:
- scheme, url = url[:i].lower(), url[i+1:]
- if scheme in uses_netloc:
- if url[:2] == '//':
- i = url.find('/', 2)
- if i < 0:
- i = len(url)
- netloc, url = url[2:i], url[i:]
- if allow_fragments and scheme in uses_fragment:
- i = url.rfind('#')
- if i >= 0:
- url, fragment = url[:i], url[i+1:]
- if scheme in uses_query:
- i = url.find('?')
- if i >= 0:
- url, query = url[:i], url[i+1:]
- if scheme in uses_params:
- i = url.find(';')
- if i >= 0:
- url, params = url[:i], url[i+1:]
- tuple = scheme, netloc, url, params, query, fragment
- _parse_cache[key] = tuple
- return tuple
+ """Parse a URL into 6 components:
+ <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
+ Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
+ Note that we don't break the components up in smaller bits
+ (e.g. netloc is a single string) and we don't expand % escapes."""
+ key = url, scheme, allow_fragments
+ cached = _parse_cache.get(key, None)
+ if cached:
+ return cached
+ if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
+ clear_cache()
+ netloc = path = params = query = fragment = ''
+ i = url.find(':')
+ if i > 0:
+ if url[:i] == 'http': # optimize the common case
+ scheme = url[:i].lower()
+ url = url[i+1:]
+ if url[:2] == '//':
+ i = url.find('/', 2)
+ if i < 0:
+ i = len(url)
+ netloc = url[2:i]
+ url = url[i:]
+ if allow_fragments:
+ i = url.rfind('#')
+ if i >= 0:
+ fragment = url[i+1:]
+ url = url[:i]
+ i = url.find('?')
+ if i >= 0:
+ query = url[i+1:]
+ url = url[:i]
+ i = url.find(';')
+ if i >= 0:
+ params = url[i+1:]
+ url = url[:i]
+ tuple = scheme, netloc, url, params, query, fragment
+ _parse_cache[key] = tuple
+ return tuple
+ for c in url[:i]:
+ if c not in scheme_chars:
+ break
+ else:
+ scheme, url = url[:i].lower(), url[i+1:]
+ if scheme in uses_netloc:
+ if url[:2] == '//':
+ i = url.find('/', 2)
+ if i < 0:
+ i = len(url)
+ netloc, url = url[2:i], url[i:]
+ if allow_fragments and scheme in uses_fragment:
+ i = url.rfind('#')
+ if i >= 0:
+ url, fragment = url[:i], url[i+1:]
+ if scheme in uses_query:
+ i = url.find('?')
+ if i >= 0:
+ url, query = url[:i], url[i+1:]
+ if scheme in uses_params:
+ i = url.find(';')
+ if i >= 0:
+ url, params = url[:i], url[i+1:]
+ tuple = scheme, netloc, url, params, query, fragment
+ _parse_cache[key] = tuple
+ return tuple
def urlunparse((scheme, netloc, url, params, query, fragment)):
- """Put a parsed URL back together again. This may result in a
- slightly different, but equivalent URL, if the URL that was parsed
- originally had redundant delimiters, e.g. a ? with an empty query
- (the draft states that these are equivalent)."""
- if netloc or (scheme in uses_netloc and url[:2] == '//'):
- if url and url[:1] != '/': url = '/' + url
- url = '//' + (netloc or '') + url
- if scheme:
- url = scheme + ':' + url
- if params:
- url = url + ';' + params
- if query:
- url = url + '?' + query
- if fragment:
- url = url + '#' + fragment
- return url
+ """Put a parsed URL back together again. This may result in a
+ slightly different, but equivalent URL, if the URL that was parsed
+ originally had redundant delimiters, e.g. a ? with an empty query
+ (the draft states that these are equivalent)."""
+ if netloc or (scheme in uses_netloc and url[:2] == '//'):
+ if url and url[:1] != '/': url = '/' + url
+ url = '//' + (netloc or '') + url
+ if scheme:
+ url = scheme + ':' + url
+ if params:
+ url = url + ';' + params
+ if query:
+ url = url + '?' + query
+ if fragment:
+ url = url + '#' + fragment
+ return url
def urljoin(base, url, allow_fragments = 1):
- """Join a base URL and a possibly relative URL to form an absolute
- interpretation of the latter."""
- if not base:
- return url
- if not url:
- return base
- bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
- urlparse(base, '', allow_fragments)
- scheme, netloc, path, params, query, fragment = \
- urlparse(url, bscheme, allow_fragments)
- if scheme != bscheme or scheme not in uses_relative:
- return url
- if scheme in uses_netloc:
- if netloc:
- return urlunparse((scheme, netloc, path,
- params, query, fragment))
- netloc = bnetloc
- if path[:1] == '/':
- return urlunparse((scheme, netloc, path,
- params, query, fragment))
- if not path:
- if not params:
- params = bparams
- if not query:
- query = bquery
- return urlunparse((scheme, netloc, bpath,
- params, query, fragment))
- segments = bpath.split('/')[:-1] + path.split('/')
- # XXX The stuff below is bogus in various ways...
- if segments[-1] == '.':
- segments[-1] = ''
- while '.' in segments:
- segments.remove('.')
- while 1:
- i = 1
- n = len(segments) - 1
- while i < n:
- if (segments[i] == '..'
- and segments[i-1] not in ('', '..')):
- del segments[i-1:i+1]
- break
- i = i+1
- else:
- break
- if segments == ['', '..']:
- segments[-1] = ''
- elif len(segments) >= 2 and segments[-1] == '..':
- segments[-2:] = ['']
- return urlunparse((scheme, netloc, '/'.join(segments),
- params, query, fragment))
+ """Join a base URL and a possibly relative URL to form an absolute
+ interpretation of the latter."""
+ if not base:
+ return url
+ if not url:
+ return base
+ bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
+ urlparse(base, '', allow_fragments)
+ scheme, netloc, path, params, query, fragment = \
+ urlparse(url, bscheme, allow_fragments)
+ if scheme != bscheme or scheme not in uses_relative:
+ return url
+ if scheme in uses_netloc:
+ if netloc:
+ return urlunparse((scheme, netloc, path,
+ params, query, fragment))
+ netloc = bnetloc
+ if path[:1] == '/':
+ return urlunparse((scheme, netloc, path,
+ params, query, fragment))
+ if not path:
+ if not params:
+ params = bparams
+ if not query:
+ query = bquery
+ return urlunparse((scheme, netloc, bpath,
+ params, query, fragment))
+ segments = bpath.split('/')[:-1] + path.split('/')
+ # XXX The stuff below is bogus in various ways...
+ if segments[-1] == '.':
+ segments[-1] = ''
+ while '.' in segments:
+ segments.remove('.')
+ while 1:
+ i = 1
+ n = len(segments) - 1
+ while i < n:
+ if (segments[i] == '..'
+ and segments[i-1] not in ('', '..')):
+ del segments[i-1:i+1]
+ break
+ i = i+1
+ else:
+ break
+ if segments == ['', '..']:
+ segments[-1] = ''
+ elif len(segments) >= 2 and segments[-1] == '..':
+ segments[-2:] = ['']
+ return urlunparse((scheme, netloc, '/'.join(segments),
+ params, query, fragment))
def urldefrag(url):
- """Removes any existing fragment from URL.
+ """Removes any existing fragment from URL.
- Returns a tuple of the defragmented URL and the fragment. If
- the URL contained no fragments, the second element is the
- empty string.
- """
- s, n, p, a, q, frag = urlparse(url)
- defrag = urlunparse((s, n, p, a, q, ''))
- return defrag, frag
+ Returns a tuple of the defragmented URL and the fragment. If
+ the URL contained no fragments, the second element is the
+ empty string.
+ """
+ s, n, p, a, q, frag = urlparse(url)
+ defrag = urlunparse((s, n, p, a, q, ''))
+ return defrag, frag
test_input = """
@@ -226,34 +226,34 @@ test_input = """
# XXX The result for //g is actually http://g/; is this a problem?
def test():
- import sys
- base = ''
- if sys.argv[1:]:
- fn = sys.argv[1]
- if fn == '-':
- fp = sys.stdin
- else:
- fp = open(fn)
- else:
- import StringIO
- fp = StringIO.StringIO(test_input)
- while 1:
- line = fp.readline()
- if not line: break
- words = line.split()
- if not words:
- continue
- url = words[0]
- parts = urlparse(url)
- print '%-10s : %s' % (url, parts)
- abs = urljoin(base, url)
- if not base:
- base = abs
- wrapped = '<URL:%s>' % abs
- print '%-10s = %s' % (url, wrapped)
- if len(words) == 3 and words[1] == '=':
- if wrapped != words[2]:
- print 'EXPECTED', words[2], '!!!!!!!!!!'
+ import sys
+ base = ''
+ if sys.argv[1:]:
+ fn = sys.argv[1]
+ if fn == '-':
+ fp = sys.stdin
+ else:
+ fp = open(fn)
+ else:
+ import StringIO
+ fp = StringIO.StringIO(test_input)
+ while 1:
+ line = fp.readline()
+ if not line: break
+ words = line.split()
+ if not words:
+ continue
+ url = words[0]
+ parts = urlparse(url)
+ print '%-10s : %s' % (url, parts)
+ abs = urljoin(base, url)
+ if not base:
+ base = abs
+ wrapped = '<URL:%s>' % abs
+ print '%-10s = %s' % (url, wrapped)
+ if len(words) == 3 and words[1] == '=':
+ if wrapped != words[2]:
+ print 'EXPECTED', words[2], '!!!!!!!!!!'
if __name__ == '__main__':
- test()
+ test()
diff --git a/Lib/uu.py b/Lib/uu.py
index bf1f82a..37bdf6a 100755
--- a/Lib/uu.py
+++ b/Lib/uu.py
@@ -3,12 +3,12 @@
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
+# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
-# not be used in advertising or publicity pertaining to distribution
+# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
@@ -154,7 +154,7 @@ def test():
print ' -d: Decode (in stead of encode)'
print ' -t: data is text, encoded format unix-compatible text'
sys.exit(1)
-
+
for o, a in optlist:
if o == '-d': dopt = 1
if o == '-t': topt = 1
diff --git a/Lib/warnings.py b/Lib/warnings.py
index 31d4ad1..9763dc6 100644
--- a/Lib/warnings.py
+++ b/Lib/warnings.py
@@ -131,29 +131,29 @@ def _processoptions(args):
# Helper for _processoptions()
def _setoption(arg):
- parts = arg.split(':')
- if len(parts) > 5:
- raise _OptionError("too many fields (max 5): %s" % `arg`)
- while len(parts) < 5:
- parts.append('')
- action, message, category, module, lineno = [s.strip()
- for s in parts]
- action = _getaction(action)
- message = re.escape(message)
- category = _getcategory(category)
- module = re.escape(module)
- if module:
- module = module + '$'
- if lineno:
- try:
- lineno = int(lineno)
- if lineno < 0:
- raise ValueError
- except (ValueError, OverflowError):
- raise _OptionError("invalid lineno %s" % `lineno`)
- else:
- lineno = 0
- filterwarnings(action, message, category, module, lineno)
+ parts = arg.split(':')
+ if len(parts) > 5:
+ raise _OptionError("too many fields (max 5): %s" % `arg`)
+ while len(parts) < 5:
+ parts.append('')
+ action, message, category, module, lineno = [s.strip()
+ for s in parts]
+ action = _getaction(action)
+ message = re.escape(message)
+ category = _getcategory(category)
+ module = re.escape(module)
+ if module:
+ module = module + '$'
+ if lineno:
+ try:
+ lineno = int(lineno)
+ if lineno < 0:
+ raise ValueError
+ except (ValueError, OverflowError):
+ raise _OptionError("invalid lineno %s" % `lineno`)
+ else:
+ lineno = 0
+ filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
diff --git a/Lib/wave.py b/Lib/wave.py
index e9a1629..ad4f29a 100644
--- a/Lib/wave.py
+++ b/Lib/wave.py
@@ -395,7 +395,7 @@ class Wave_write:
def getmarkers(self):
return None
-
+
def tell(self):
return self._nframeswritten
diff --git a/Lib/webbrowser.py b/Lib/webbrowser.py
index 759f470..2fa1148 100644
--- a/Lib/webbrowser.py
+++ b/Lib/webbrowser.py
@@ -122,7 +122,7 @@ class Konqueror:
return not rc
def open(self, url, new=1):
- # XXX currently I know no way to prevent KFM from opening a new win.
+ # XXX currently I know no way to prevent KFM from opening a new win.
self.open_new(url)
def open_new(self, url):
diff --git a/Lib/whrandom.py b/Lib/whrandom.py
index f43b2f9..a3a9bf7 100644
--- a/Lib/whrandom.py
+++ b/Lib/whrandom.py
@@ -1,21 +1,21 @@
"""Wichman-Hill random number generator.
Wichmann, B. A. & Hill, I. D. (1982)
-Algorithm AS 183:
+Algorithm AS 183:
An efficient and portable pseudo-random number generator
Applied Statistics 31 (1982) 188-190
-see also:
+see also:
Correction to Algorithm AS 183
- Applied Statistics 33 (1984) 123
+ Applied Statistics 33 (1984) 123
McLeod, A. I. (1985)
- A remark on Algorithm AS 183
+ A remark on Algorithm AS 183
Applied Statistics 34 (1985),198-200
USE:
-whrandom.random() yields double precision random numbers
+whrandom.random() yields double precision random numbers
uniformly distributed between 0 and 1.
whrandom.seed(x, y, z) must be called before whrandom.random()
@@ -38,96 +38,96 @@ down in the serial case by using a lock here.)
class whrandom:
- def __init__(self, x = 0, y = 0, z = 0):
- """Initialize an instance.
- Without arguments, initialize from current time.
- With arguments (x, y, z), initialize from them."""
- self.seed(x, y, z)
-
- def seed(self, x = 0, y = 0, z = 0):
- """Set the seed from (x, y, z).
- These must be integers in the range [0, 256)."""
- if not type(x) == type(y) == type(z) == type(0):
- raise TypeError, 'seeds must be integers'
- if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
- raise ValueError, 'seeds must be in range(0, 256)'
- if 0 == x == y == z:
- # Initialize from current time
- import time
- t = long(time.time() * 256)
- t = int((t&0xffffff) ^ (t>>24))
- t, x = divmod(t, 256)
- t, y = divmod(t, 256)
- t, z = divmod(t, 256)
- # Zero is a poor seed, so substitute 1
- self._seed = (x or 1, y or 1, z or 1)
-
- def random(self):
- """Get the next random number in the range [0.0, 1.0)."""
- # This part is thread-unsafe:
- # BEGIN CRITICAL SECTION
- x, y, z = self._seed
- #
- x = (171 * x) % 30269
- y = (172 * y) % 30307
- z = (170 * z) % 30323
- #
- self._seed = x, y, z
- # END CRITICAL SECTION
- #
- return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
-
- def uniform(self, a, b):
- """Get a random number in the range [a, b)."""
- return a + (b-a) * self.random()
-
- def randint(self, a, b):
- """Get a random integer in the range [a, b] including
- both end points.
-
- (Deprecated; use randrange below.)"""
- return self.randrange(a, b+1)
-
- def choice(self, seq):
- """Choose a random element from a non-empty sequence."""
- return seq[int(self.random() * len(seq))]
-
- def randrange(self, start, stop=None, step=1, int=int, default=None):
- """Choose a random item from range(start, stop[, step]).
-
- This fixes the problem with randint() which includes the
- endpoint; in Python this is usually not what you want.
- Do not supply the 'int' and 'default' arguments."""
- # This code is a bit messy to make it fast for the
- # common case while still doing adequate error checking
- istart = int(start)
- if istart != start:
- raise ValueError, "non-integer arg 1 for randrange()"
- if stop is default:
- if istart > 0:
- return int(self.random() * istart)
- raise ValueError, "empty range for randrange()"
- istop = int(stop)
- if istop != stop:
- raise ValueError, "non-integer stop for randrange()"
- if step == 1:
- if istart < istop:
- return istart + int(self.random() *
- (istop - istart))
- raise ValueError, "empty range for randrange()"
- istep = int(step)
- if istep != step:
- raise ValueError, "non-integer step for randrange()"
- if istep > 0:
- n = (istop - istart + istep - 1) / istep
- elif istep < 0:
- n = (istop - istart + istep + 1) / istep
- else:
- raise ValueError, "zero step for randrange()"
-
- if n <= 0:
- raise ValueError, "empty range for randrange()"
- return istart + istep*int(self.random() * n)
+ def __init__(self, x = 0, y = 0, z = 0):
+ """Initialize an instance.
+ Without arguments, initialize from current time.
+ With arguments (x, y, z), initialize from them."""
+ self.seed(x, y, z)
+
+ def seed(self, x = 0, y = 0, z = 0):
+ """Set the seed from (x, y, z).
+ These must be integers in the range [0, 256)."""
+ if not type(x) == type(y) == type(z) == type(0):
+ raise TypeError, 'seeds must be integers'
+ if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
+ raise ValueError, 'seeds must be in range(0, 256)'
+ if 0 == x == y == z:
+ # Initialize from current time
+ import time
+ t = long(time.time() * 256)
+ t = int((t&0xffffff) ^ (t>>24))
+ t, x = divmod(t, 256)
+ t, y = divmod(t, 256)
+ t, z = divmod(t, 256)
+ # Zero is a poor seed, so substitute 1
+ self._seed = (x or 1, y or 1, z or 1)
+
+ def random(self):
+ """Get the next random number in the range [0.0, 1.0)."""
+ # This part is thread-unsafe:
+ # BEGIN CRITICAL SECTION
+ x, y, z = self._seed
+ #
+ x = (171 * x) % 30269
+ y = (172 * y) % 30307
+ z = (170 * z) % 30323
+ #
+ self._seed = x, y, z
+ # END CRITICAL SECTION
+ #
+ return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
+
+ def uniform(self, a, b):
+ """Get a random number in the range [a, b)."""
+ return a + (b-a) * self.random()
+
+ def randint(self, a, b):
+ """Get a random integer in the range [a, b] including
+ both end points.
+
+ (Deprecated; use randrange below.)"""
+ return self.randrange(a, b+1)
+
+ def choice(self, seq):
+ """Choose a random element from a non-empty sequence."""
+ return seq[int(self.random() * len(seq))]
+
+ def randrange(self, start, stop=None, step=1, int=int, default=None):
+ """Choose a random item from range(start, stop[, step]).
+
+ This fixes the problem with randint() which includes the
+ endpoint; in Python this is usually not what you want.
+ Do not supply the 'int' and 'default' arguments."""
+ # This code is a bit messy to make it fast for the
+ # common case while still doing adequate error checking
+ istart = int(start)
+ if istart != start:
+ raise ValueError, "non-integer arg 1 for randrange()"
+ if stop is default:
+ if istart > 0:
+ return int(self.random() * istart)
+ raise ValueError, "empty range for randrange()"
+ istop = int(stop)
+ if istop != stop:
+ raise ValueError, "non-integer stop for randrange()"
+ if step == 1:
+ if istart < istop:
+ return istart + int(self.random() *
+ (istop - istart))
+ raise ValueError, "empty range for randrange()"
+ istep = int(step)
+ if istep != step:
+ raise ValueError, "non-integer step for randrange()"
+ if istep > 0:
+ n = (istop - istart + istep - 1) / istep
+ elif istep < 0:
+ n = (istop - istart + istep + 1) / istep
+ else:
+ raise ValueError, "zero step for randrange()"
+
+ if n <= 0:
+ raise ValueError, "empty range for randrange()"
+ return istart + istep*int(self.random() * n)
# Initialize from the current time
diff --git a/Lib/xdrlib.py b/Lib/xdrlib.py
index c97975d..621f295 100644
--- a/Lib/xdrlib.py
+++ b/Lib/xdrlib.py
@@ -29,7 +29,7 @@ class ConversionError(Error):
pass
-
+
class Packer:
"""Pack various data representations into a buffer."""
@@ -106,7 +106,7 @@ class Packer:
self.pack_farray(n, list, pack_item)
-
+
class Unpacker:
"""Unpacks various data representations from the given buffer."""
@@ -220,7 +220,7 @@ class Unpacker:
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
-
+
# test suite
def _test():
p = Packer()
@@ -274,6 +274,6 @@ def _test():
print 'ConversionError:', var.msg
count = count + 1
-
+
if __name__ == '__main__':
_test()
diff --git a/Lib/xmllib.py b/Lib/xmllib.py
index 8bca0fc..f09ba90 100644
--- a/Lib/xmllib.py
+++ b/Lib/xmllib.py
@@ -250,9 +250,9 @@ class XMLParser:
break
res = interesting.search(rawdata, i)
if res:
- j = res.start(0)
+ j = res.start(0)
else:
- j = n
+ j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
diff --git a/Lib/zipfile.py b/Lib/zipfile.py
index 3a40dcb..1c2b0de 100644
--- a/Lib/zipfile.py
+++ b/Lib/zipfile.py
@@ -6,13 +6,13 @@ import struct, os, time
import binascii
try:
- import zlib # We may need its compression method
+ import zlib # We may need its compression method
except:
zlib = None
class BadZipfile(Exception):
pass
-error = BadZipfile # The exception raised by this module
+error = BadZipfile # The exception raised by this module
# constants for Zip file compression methods
ZIP_STORED = 0
@@ -35,11 +35,11 @@ def is_zipfile(filename):
"""
try:
fpin = open(filename, "rb")
- fpin.seek(-22, 2) # Seek to end-of-file record
+ fpin.seek(-22, 2) # Seek to end-of-file record
endrec = fpin.read()
fpin.close()
if endrec[0:4] == "PK\005\006" and endrec[-2:] == "\000\000":
- return 1 # file has correct magic number
+ return 1 # file has correct magic number
except:
pass
@@ -48,26 +48,26 @@ class ZipInfo:
"""Class with attributes describing each file in the ZIP archive."""
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
- self.filename = filename # Name of the file in the archive
- self.date_time = date_time # year, month, day, hour, min, sec
+ self.filename = filename # Name of the file in the archive
+ self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
- self.compress_type = ZIP_STORED # Type of compression for the file
- self.comment = "" # Comment for each file
- self.extra = "" # ZIP extra data
- self.create_system = 0 # System which created ZIP archive
- self.create_version = 20 # Version which created ZIP archive
- self.extract_version = 20 # Version needed to extract archive
- self.reserved = 0 # Must be zero
- self.flag_bits = 0 # ZIP flag bits
- self.volume = 0 # Volume number of file header
- self.internal_attr = 0 # Internal attributes
- self.external_attr = 0 # External file attributes
+ self.compress_type = ZIP_STORED # Type of compression for the file
+ self.comment = "" # Comment for each file
+ self.extra = "" # ZIP extra data
+ self.create_system = 0 # System which created ZIP archive
+ self.create_version = 20 # Version which created ZIP archive
+ self.extract_version = 20 # Version needed to extract archive
+ self.reserved = 0 # Must be zero
+ self.flag_bits = 0 # ZIP flag bits
+ self.volume = 0 # Volume number of file header
+ self.internal_attr = 0 # Internal attributes
+ self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
- # header_offset Byte offset to the file header
- # file_offset Byte offset to the start of the file data
- # CRC CRC-32 of the uncompressed file
- # compress_size Size of the compressed file
- # file_size Size of the uncompressed file
+ # header_offset Byte offset to the file header
+ # file_offset Byte offset to the start of the file data
+ # CRC CRC-32 of the uncompressed file
+ # compress_size Size of the compressed file
+ # file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
@@ -75,12 +75,12 @@ class ZipInfo:
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | dt[5] / 2
if self.flag_bits & 0x08:
- # Set these to zero because we write them after the file data
- CRC = compress_size = file_size = 0
+ # Set these to zero because we write them after the file data
+ CRC = compress_size = file_size = 0
else:
- CRC = self.CRC
- compress_size = self.compress_size
- file_size = self.file_size
+ CRC = self.CRC
+ compress_size = self.compress_size
+ file_size = self.file_size
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, self.flag_bits,
self.compress_type, dostime, dosdate, CRC,
@@ -102,10 +102,10 @@ class ZipFile:
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
- self.debug = 0 # Level of printing: 0 through 3
- self.NameToInfo = {} # Find file info given name
- self.filelist = [] # List of ZipInfo instances for archive
- self.compression = compression # Method of compression
+ self.debug = 0 # Level of printing: 0 through 3
+ self.NameToInfo = {} # Find file info given name
+ self.filelist = [] # List of ZipInfo instances for archive
+ self.compression = compression # Method of compression
self.filename = filename
self.mode = key = mode[0]
if key == 'r':
@@ -115,14 +115,14 @@ class ZipFile:
self.fp = open(filename, "wb")
elif key == 'a':
fp = self.fp = open(filename, "r+b")
- fp.seek(-22, 2) # Seek to end-of-file record
+ fp.seek(-22, 2) # Seek to end-of-file record
endrec = fp.read()
if endrec[0:4] == stringEndArchive and \
endrec[-2:] == "\000\000":
- self._GetContents() # file is a zip file
+ self._GetContents() # file is a zip file
# seek to start of directory and overwrite
fp.seek(self.start_dir, 0)
- else: # file is not a zip file, just append
+ else: # file is not a zip file, just append
fp.seek(0, 2)
else:
raise RuntimeError, 'Mode must be "r", "w" or "a"'
@@ -130,16 +130,16 @@ class ZipFile:
def _GetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
- fp.seek(-22, 2) # Start of end-of-archive record
- filesize = fp.tell() + 22 # Get file size
- endrec = fp.read(22) # Archive must not end with a comment!
+ fp.seek(-22, 2) # Start of end-of-archive record
+ filesize = fp.tell() + 22 # Get file size
+ endrec = fp.read(22) # Archive must not end with a comment!
if endrec[0:4] != stringEndArchive or endrec[-2:] != "\000\000":
raise BadZipfile, "File is not a zip file, or ends with a comment"
endrec = struct.unpack(structEndArchive, endrec)
if self.debug > 1:
print endrec
- size_cd = endrec[5] # bytes in central directory
- offset_cd = endrec[6] # offset of central directory
+ size_cd = endrec[5] # bytes in central directory
+ offset_cd = endrec[6] # offset of central directory
x = filesize - 22 - size_cd
# "concat" is zero, unless zip was concatenated to another file
concat = x - offset_cd
@@ -211,7 +211,7 @@ class ZipFile:
"""Read all the files and check the CRC."""
for zinfo in self.filelist:
try:
- self.read(zinfo.filename) # Check CRC-32
+ self.read(zinfo.filename) # Check CRC-32
except:
return zinfo.filename
@@ -256,7 +256,7 @@ class ZipFile:
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if self.NameToInfo.has_key(zinfo.filename):
- if self.debug: # Warning for duplicate names
+ if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
@@ -278,20 +278,20 @@ class ZipFile:
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
- zinfo = ZipInfo(filename, date_time)
+ zinfo = ZipInfo(filename, date_time)
else:
- zinfo = ZipInfo(arcname, date_time)
- zinfo.external_attr = st[0] << 16 # Unix attributes
+ zinfo = ZipInfo(arcname, date_time)
+ zinfo.external_attr = st[0] << 16 # Unix attributes
if compress_type is None:
- zinfo.compress_type = self.compression
+ zinfo.compress_type = self.compression
else:
- zinfo.compress_type = compress_type
+ zinfo.compress_type = compress_type
self._writecheck(zinfo)
fp = open(filename, "rb")
zinfo.flag_bits = 0x08
- zinfo.header_offset = self.fp.tell() # Start of header bytes
+ zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
- zinfo.file_offset = self.fp.tell() # Start of file bytes
+ zinfo.file_offset = self.fp.tell() # Start of file bytes
CRC = 0
compress_size = 0
file_size = 0
@@ -330,23 +330,23 @@ class ZipFile:
"""Write a file into the archive. The contents is the string
'bytes'."""
self._writecheck(zinfo)
- zinfo.file_size = len(bytes) # Uncompressed size
- zinfo.CRC = binascii.crc32(bytes) # CRC-32 checksum
+ zinfo.file_size = len(bytes) # Uncompressed size
+ zinfo.CRC = binascii.crc32(bytes) # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
- zinfo.compress_size = len(bytes) # Compressed size
+ zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
- zinfo.header_offset = self.fp.tell() # Start of header bytes
+ zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
- zinfo.file_offset = self.fp.tell() # Start of file bytes
+ zinfo.file_offset = self.fp.tell() # Start of file bytes
self.fp.write(bytes)
if zinfo.flag_bits & 0x08:
- # Write CRC and file sizes after the file data
- self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
- zinfo.file_size))
+ # Write CRC and file sizes after the file data
+ self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
+ zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
@@ -359,10 +359,10 @@ class ZipFile:
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
- if self.mode in ("w", "a"): # write ending records
+ if self.mode in ("w", "a"): # write ending records
count = 0
pos1 = self.fp.tell()
- for zinfo in self.filelist: # write central directory
+ for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
@@ -468,7 +468,7 @@ class PyZipFile(ZipFile):
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo)[8] >= os.stat(file_py)[8]:
- fname = file_pyo # Use .pyo file
+ fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc)[8] < os.stat(file_py)[8]:
import py_compile