From e7b146fb3bdca62a0d5ecc06dbf3348e5a4fe757 Mon Sep 17 00:00:00 2001 From: Guido van Rossum Date: Fri, 4 Feb 2000 15:28:42 +0000 Subject: The third and final doc-string sweep by Ka-Ping Yee. The attached patches update the standard library so that all modules have docstrings beginning with one-line summaries. A new docstring was added to formatter. The docstring for os.py was updated to mention nt, os2, ce in addition to posix, dos, mac. --- Lib/quopri.py | 16 +- Lib/random.py | 41 +-- Lib/regex_syntax.py | 10 +- Lib/regsub.py | 16 +- Lib/repr.py | 2 +- Lib/sgmllib.py | 2 +- Lib/shlex.py | 2 + Lib/shutil.py | 2 +- Lib/stat.py | 12 +- Lib/statcache.py | 32 +- Lib/statvfs.py | 6 +- Lib/string.py | 12 +- Lib/sunau.py | 207 ++++++------ Lib/sunaudio.py | 11 +- Lib/symbol.py | 6 +- Lib/tempfile.py | 14 +- Lib/threading.py | 3 +- Lib/toaiff.py | 16 +- Lib/token.py | 6 +- Lib/traceback.py | 75 ++++- Lib/tty.py | 7 +- Lib/types.py | 6 +- Lib/tzparse.py | 16 +- Lib/urllib.py | 187 ++++++----- Lib/urllib2.py | 2 +- Lib/urlparse.py | 29 +- Lib/uu.py | 9 +- Lib/wave.py | 899 ++++++++++++++++++++++++++-------------------------- Lib/whrandom.py | 125 ++++---- 29 files changed, 942 insertions(+), 829 deletions(-) diff --git a/Lib/quopri.py b/Lib/quopri.py index 6b45597..cd2f5eb 100755 --- a/Lib/quopri.py +++ b/Lib/quopri.py @@ -1,6 +1,7 @@ #! /usr/bin/env python -# Conversions to/from quoted-printable transport encoding as per RFC-1521 +"""Conversions to/from quoted-printable transport encoding as per RFC-1521.""" + # (Dec 1991 version). ESCAPE = '=' @@ -8,11 +9,15 @@ MAXLINESIZE = 76 HEX = '0123456789ABCDEF' def needsquoting(c, quotetabs): + """Decide whether a particular character needs to be quoted. + + The 'quotetabs' flag indicates whether tabs should be quoted.""" if c == '\t': return not quotetabs return c == ESCAPE or not(' ' <= c <= '~') def quote(c): + """Quote a single character.""" if c == ESCAPE: return ESCAPE * 2 else: @@ -20,6 +25,10 @@ def quote(c): return ESCAPE + HEX[i/16] + HEX[i%16] def encode(input, output, quotetabs): + """Read 'input', apply quoted-printable encoding, and write to 'output'. + + 'input' and 'output' are files with readline() and write() methods. + The 'quotetabs' flag indicates whether tabs should be quoted.""" while 1: line = input.readline() if not line: break @@ -42,6 +51,9 @@ def encode(input, output, quotetabs): output.write(new + '\n') def decode(input, output): + """Read 'input', apply quoted-printable decoding, and write to 'output'. + + 'input' and 'output' are files with readline() and write() methods.""" new = '' while 1: line = input.readline() @@ -73,9 +85,11 @@ def decode(input, output): output.write(new) def ishex(c): + """Return true if the character 'c' is a hexadecimal digit.""" return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F' def unhex(s): + """Get the integer value of a hexadecimal number.""" bits = 0 for c in s: if '0' <= c <= '9': diff --git a/Lib/random.py b/Lib/random.py index 9e899ad..21cff89 100644 --- a/Lib/random.py +++ b/Lib/random.py @@ -1,23 +1,24 @@ -# R A N D O M V A R I A B L E G E N E R A T O R S -# -# distributions on the real line: -# ------------------------------ -# normal (Gaussian) -# lognormal -# negative exponential -# gamma -# beta -# -# distributions on the circle (angles 0 to 2pi) -# --------------------------------------------- -# circular uniform -# von Mises - -# Translated from anonymously contributed C/C++ source. - -# Multi-threading note: the random number generator used here is not -# thread-safe; it is possible that two calls return the same random -# value. See whrandom.py for more info. +"""Random variable generators. + + distributions on the real line: + ------------------------------ + normal (Gaussian) + lognormal + negative exponential + gamma + beta + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +Translated from anonymously contributed C/C++ source. + +Multi-threading note: the random number generator used here is not +thread-safe; it is possible that two calls return the same random +value. See whrandom.py for more info. +""" import whrandom from whrandom import random, uniform, randint, choice, randrange # For export! diff --git a/Lib/regex_syntax.py b/Lib/regex_syntax.py index 8631f42..aab7e7a 100644 --- a/Lib/regex_syntax.py +++ b/Lib/regex_syntax.py @@ -1,5 +1,11 @@ -# These bits are passed to regex.set_syntax() to choose among -# alternative regexp syntaxes. +"""Constants for selecting regexp syntaxes for the obsolete regex module. + +This module is only for backward compatibility. "regex" has now +been replaced by the new regular expression module, "re". + +These bits are passed to regex.set_syntax() to choose among +alternative regexp syntaxes. +""" # 1 means plain parentheses serve as grouping, and backslash # parentheses are needed for literal searching. diff --git a/Lib/regsub.py b/Lib/regsub.py index 8fb3306..8e341bb 100644 --- a/Lib/regsub.py +++ b/Lib/regsub.py @@ -1,10 +1,14 @@ -# Regular expression subroutines: -# sub(pat, repl, str): replace first occurrence of pattern in string -# gsub(pat, repl, str): replace all occurrences of pattern in string -# split(str, pat, maxsplit): split string using pattern as delimiter -# splitx(str, pat, maxsplit): split string using pattern as delimiter plus -# return delimiters +"""Regexp-based split and replace using the obsolete regex module. +This module is only for backward compatibility. These operations +are now provided by the new regular expression module, "re". + +sub(pat, repl, str): replace first occurrence of pattern in string +gsub(pat, repl, str): replace all occurrences of pattern in string +split(str, pat, maxsplit): split string using pattern as delimiter +splitx(str, pat, maxsplit): split string using pattern as delimiter plus + return delimiters +""" import regex diff --git a/Lib/repr.py b/Lib/repr.py index 6376a14..0827428 100644 --- a/Lib/repr.py +++ b/Lib/repr.py @@ -1,4 +1,4 @@ -# Redo the `...` (representation) but with limits on most sizes. +"""Redo the `...` (representation) but with limits on most sizes.""" import string diff --git a/Lib/sgmllib.py b/Lib/sgmllib.py index 8d50a88..a620a7d 100644 --- a/Lib/sgmllib.py +++ b/Lib/sgmllib.py @@ -1,4 +1,4 @@ -# A parser for SGML, using the derived class as static DTD. +"""A parser for SGML, using the derived class as a static DTD.""" # XXX This only supports those SGML features used by HTML. diff --git a/Lib/shlex.py b/Lib/shlex.py index 6eba4c8..c1ee02d 100644 --- a/Lib/shlex.py +++ b/Lib/shlex.py @@ -1,3 +1,5 @@ +"""A lexical analyzer class for simple shell-like syntaxes.""" + # Module and documentation by Eric S. Raymond, 21 Dec 1998 import sys diff --git a/Lib/shutil.py b/Lib/shutil.py index 162ff47..698543f 100644 --- a/Lib/shutil.py +++ b/Lib/shutil.py @@ -1,4 +1,4 @@ -"""Utility functions for copying files. +"""Utility functions for copying files and directory trees. XXX The functions here don't copy the resource fork or other metadata on Mac. diff --git a/Lib/stat.py b/Lib/stat.py index 5f6f522..9280ce6 100644 --- a/Lib/stat.py +++ b/Lib/stat.py @@ -1,10 +1,8 @@ -# Module 'stat' -# -# Defines constants and functions for interpreting stat/lstat struct -# as returned by os.stat() and os.lstat() (if it exists). -# -# Suggested usage: from stat import * -# +"""Constants/functions for interpreting results of os.stat() and os.lstat(). + +Suggested usage: from stat import * +""" + # XXX Strictly spoken, this module may have to be adapted for each POSIX # implementation; in practice, however, the numeric constants used by # stat() are almost universal (even for stat() emulations on non-UNIX diff --git a/Lib/statcache.py b/Lib/statcache.py index 770aef0..0d88a9a 100644 --- a/Lib/statcache.py +++ b/Lib/statcache.py @@ -1,7 +1,6 @@ -# Module 'statcache' -# -# Maintain a cache of file stats. -# There are functions to reset the cache or to selectively remove items. +"""Maintain a cache of file stats. +There are functions to reset the cache or to selectively remove items. +""" import os from stat import * @@ -12,42 +11,37 @@ from stat import * cache = {} -# Stat a file, possibly out of the cache. -# def stat(path): + """Stat a file, possibly out of the cache.""" if cache.has_key(path): return cache[path] cache[path] = ret = os.stat(path) return ret -# Reset the cache completely. -# def reset(): + """Reset the cache completely.""" global cache cache = {} -# Remove a given item from the cache, if it exists. -# def forget(path): + """Remove a given item from the cache, if it exists.""" if cache.has_key(path): del cache[path] -# Remove all pathnames with a given prefix. -# def forget_prefix(prefix): + """Remove all pathnames with a given prefix.""" n = len(prefix) for path in cache.keys(): if path[:n] == prefix: del cache[path] -# Forget about a directory and all entries in it, but not about -# entries in subdirectories. -# def forget_dir(prefix): + """Forget about a directory and all entries in it, but not about + entries in subdirectories.""" if prefix[-1:] == '/' and prefix <> '/': prefix = prefix[:-1] forget(prefix) @@ -62,19 +56,17 @@ def forget_dir(prefix): del cache[path] -# Remove all pathnames except with a given prefix. -# Normally used with prefix = '/' after a chdir(). -# def forget_except_prefix(prefix): + """Remove all pathnames except with a given prefix. + Normally used with prefix = '/' after a chdir().""" n = len(prefix) for path in cache.keys(): if path[:n] <> prefix: del cache[path] -# Check for directory. -# def isdir(path): + """Check for directory.""" try: st = stat(path) except os.error: diff --git a/Lib/statvfs.py b/Lib/statvfs.py index 49dd962..082f20a 100644 --- a/Lib/statvfs.py +++ b/Lib/statvfs.py @@ -1,8 +1,4 @@ -# Module 'statvfs' -# -# Defines constants for interpreting statvfs struct as returned -# by os.statvfs() and os.fstatvfs() (if they exist). -# +"""Constants for interpreting the results of os.statvfs() and os.fstatvfs().""" # Indices for statvfs struct members in the tuple returned by # os.statvfs() and os.fstatvfs(). diff --git a/Lib/string.py b/Lib/string.py index a43da32..d7969dd 100644 --- a/Lib/string.py +++ b/Lib/string.py @@ -1,11 +1,9 @@ -# module 'string' -- A collection of string operations +"""A collection of string operations (most are no longer used in Python 1.6). -# Warning: most of the code you see here isn't normally used nowadays. With -# Python 1.6, many of these functions are implemented as methods on the -# standard string object. They used to be implemented by a built-in module -# called strop, but strop is now obsolete itself. - -"""Common string manipulations. +Warning: most of the code you see here isn't normally used nowadays. With +Python 1.6, many of these functions are implemented as methods on the +standard string object. They used to be implemented by a built-in module +called strop, but strop is now obsolete itself. Public module variables: diff --git a/Lib/sunau.py b/Lib/sunau.py index 0ba7dc9..5e8ac32 100644 --- a/Lib/sunau.py +++ b/Lib/sunau.py @@ -1,106 +1,107 @@ -# Stuff to parse Sun and NeXT audio files. -# -# An audio consists of a header followed by the data. The structure -# of the header is as follows. -# -# +---------------+ -# | magic word | -# +---------------+ -# | header size | -# +---------------+ -# | data size | -# +---------------+ -# | encoding | -# +---------------+ -# | sample rate | -# +---------------+ -# | # of channels | -# +---------------+ -# | info | -# | | -# +---------------+ -# -# The magic word consists of the 4 characters '.snd'. Apart from the -# info field, all header fields are 4 bytes in size. They are all -# 32-bit unsigned integers encoded in big-endian byte order. -# -# The header size really gives the start of the data. -# The data size is the physical size of the data. From the other -# parameter the number of frames can be calculated. -# The encoding gives the way in which audio samples are encoded. -# Possible values are listed below. -# The info field currently consists of an ASCII string giving a -# human-readable description of the audio file. The info field is -# padded with NUL bytes to the header size. -# -# Usage. -# -# Reading audio files: -# f = sunau.open(file, 'r') -# where file is either the name of a file or an open file pointer. -# The open file pointer must have methods read(), seek(), and close(). -# When the setpos() and rewind() methods are not used, the seek() -# method is not necessary. -# -# This returns an instance of a class with the following public methods: -# getnchannels() -- returns number of audio channels (1 for -# mono, 2 for stereo) -# getsampwidth() -- returns sample width in bytes -# getframerate() -- returns sampling frequency -# getnframes() -- returns number of audio frames -# getcomptype() -- returns compression type ('NONE' or 'ULAW') -# getcompname() -- returns human-readable version of -# compression type ('not compressed' matches 'NONE') -# getparams() -- returns a tuple consisting of all of the -# above in the above order -# getmarkers() -- returns None (for compatibility with the -# aifc module) -# getmark(id) -- raises an error since the mark does not -# exist (for compatibility with the aifc module) -# readframes(n) -- returns at most n frames of audio -# rewind() -- rewind to the beginning of the audio stream -# setpos(pos) -- seek to the specified position -# tell() -- return the current position -# close() -- close the instance (make it unusable) -# The position returned by tell() and the position given to setpos() -# are compatible and have nothing to do with the actual postion in the -# file. -# The close() method is called automatically when the class instance -# is destroyed. -# -# Writing audio files: -# f = sunau.open(file, 'w') -# where file is either the name of a file or an open file pointer. -# The open file pointer must have methods write(), tell(), seek(), and -# close(). -# -# This returns an instance of a class with the following public methods: -# setnchannels(n) -- set the number of channels -# setsampwidth(n) -- set the sample width -# setframerate(n) -- set the frame rate -# setnframes(n) -- set the number of frames -# setcomptype(type, name) -# -- set the compression type and the -# human-readable compression type -# setparams(tuple)-- set all parameters at once -# tell() -- return current position in output file -# writeframesraw(data) -# -- write audio frames without pathing up the -# file header -# writeframes(data) -# -- write audio frames and patch up the file header -# close() -- patch up the file header and close the -# output file -# You should set the parameters before the first writeframesraw or -# writeframes. The total number of frames does not need to be set, -# but when it is set to the correct value, the header does not have to -# be patched up. -# It is best to first set all parameters, perhaps possibly the -# compression type, and then write audio frames using writeframesraw. -# When all frames have been written, either call writeframes('') or -# close() to patch up the sizes in the header. -# The close() method is called automatically when the class instance -# is destroyed. +"""Stuff to parse Sun and NeXT audio files. + +An audio consists of a header followed by the data. The structure +of the header is as follows. + + +---------------+ + | magic word | + +---------------+ + | header size | + +---------------+ + | data size | + +---------------+ + | encoding | + +---------------+ + | sample rate | + +---------------+ + | # of channels | + +---------------+ + | info | + | | + +---------------+ + +The magic word consists of the 4 characters '.snd'. Apart from the +info field, all header fields are 4 bytes in size. They are all +32-bit unsigned integers encoded in big-endian byte order. + +The header size really gives the start of the data. +The data size is the physical size of the data. From the other +parameter the number of frames can be calculated. +The encoding gives the way in which audio samples are encoded. +Possible values are listed below. +The info field currently consists of an ASCII string giving a +human-readable description of the audio file. The info field is +padded with NUL bytes to the header size. + +Usage. + +Reading audio files: + f = sunau.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +When the setpos() and rewind() methods are not used, the seek() +method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' or 'ULAW') + getcompname() -- returns human-readable version of + compression type ('not compressed' matches 'NONE') + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- returns None (for compatibility with the + aifc module) + getmark(id) -- raises an error since the mark does not + exist (for compatibility with the aifc module) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell() and the position given to setpos() +are compatible and have nothing to do with the actual postion in the +file. +The close() method is called automatically when the class instance +is destroyed. + +Writing audio files: + f = sunau.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple)-- set all parameters at once + tell() -- return current position in output file + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +The close() method is called automatically when the class instance +is destroyed. +""" # from AUDIO_FILE_MAGIC = 0x2e736e64 diff --git a/Lib/sunaudio.py b/Lib/sunaudio.py index 96def15..b7df71c 100644 --- a/Lib/sunaudio.py +++ b/Lib/sunaudio.py @@ -1,19 +1,17 @@ -# Module 'sunaudio' -- interpret sun audio headers +"""Interpret sun audio headers.""" MAGIC = '.snd' error = 'sunaudio sound header conversion error' -# convert a 4-char value to integer - def get_long_be(s): + """Convert a 4-char value to integer.""" return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3]) -# read a sound header from an open file - def gethdr(fp): + """Read a sound header from an open file.""" if fp.read(4) <> MAGIC: raise error, 'gethdr: bad magic word' hdr_size = get_long_be(fp.read(4)) @@ -31,9 +29,8 @@ def gethdr(fp): return (data_size, encoding, sample_rate, channels, info) -# read and print the sound header of a named file - def printhdr(file): + """Read and print the sound header of a named file.""" hdr = gethdr(open(file, 'r')) data_size, encoding, sample_rate, channels, info = hdr while info[-1:] == '\0': diff --git a/Lib/symbol.py b/Lib/symbol.py index 73b33e0..8278b22 100755 --- a/Lib/symbol.py +++ b/Lib/symbol.py @@ -1,7 +1,7 @@ #! /usr/bin/env python -# -# Non-terminal symbols of Python grammar (from "graminit.h") -# + +"""Non-terminal symbols of Python grammar (from "graminit.h").""" + # This file is automatically generated; please don't muck it up! # # To update the symbols in this file, 'cd' to the top directory of diff --git a/Lib/tempfile.py b/Lib/tempfile.py index 68cc896..5b05bdd 100644 --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -1,5 +1,5 @@ -# Temporary file name allocation -# +"""Temporary files and filenames.""" + # XXX This tries to be not UNIX specific, but I don't know beans about # how to choose a temp directory or filename on MS-DOS or other # systems so it may have to be changed... @@ -14,9 +14,8 @@ tempdir = None template = None -# Function to calculate the directory to use - def gettempdir(): + """Function to calculate the directory to use.""" global tempdir if tempdir is not None: return tempdir @@ -58,11 +57,10 @@ def gettempdir(): return tempdir -# Function to calculate a prefix of the filename to use - _pid = None def gettempprefix(): + """Function to calculate a prefix of the filename to use.""" global template, _pid if os.name == 'posix' and _pid and _pid != os.getpid(): # Our pid changed; we must have forked -- zap the template @@ -85,9 +83,8 @@ def gettempprefix(): counter = 0 -# User-callable function to return a unique temporary file name - def mktemp(suffix=""): + """User-callable function to return a unique temporary file name.""" global counter dir = gettempdir() pre = gettempprefix() @@ -126,6 +123,7 @@ class TemporaryFileWrapper: def TemporaryFile(mode='w+b', bufsize=-1, suffix=""): + """Create and return a temporary file (opened read-write by default).""" name = mktemp(suffix) if os.name == 'posix': # Unix -- be very careful diff --git a/Lib/threading.py b/Lib/threading.py index 8d5e833..8d27faf 100644 --- a/Lib/threading.py +++ b/Lib/threading.py @@ -1,5 +1,4 @@ -# threading.py: -# Proposed new threading module, emulating a subset of Java's threading model +"""Proposed new threading module, emulating a subset of Java's threading model.""" import sys import time diff --git a/Lib/toaiff.py b/Lib/toaiff.py index 1e73526..7fd0153 100644 --- a/Lib/toaiff.py +++ b/Lib/toaiff.py @@ -1,10 +1,12 @@ -# Convert "arbitrary" sound files to AIFF files (Apple and SGI's audio format). -# Input may be compressed. -# Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others. -# An exception is raised if the file is not of a recognized type. -# Returned filename is either the input filename or a temporary filename; -# in the latter case the caller must ensure that it is removed. -# Other temporary files used are removed by the function. +"""Convert "arbitrary" sound files to AIFF (Apple and SGI's audio format). + +Input may be compressed. +Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others. +An exception is raised if the file is not of a recognized type. +Returned filename is either the input filename or a temporary filename; +in the latter case the caller must ensure that it is removed. +Other temporary files used are removed by the function. +""" import os import tempfile diff --git a/Lib/token.py b/Lib/token.py index 69941dc..ed45c91 100755 --- a/Lib/token.py +++ b/Lib/token.py @@ -1,7 +1,7 @@ #! /usr/bin/env python -# -# Tokens (from "token.h") -# + +"""Token constants (from "token.h").""" + # This file is automatically generated; please don't muck it up! # # To update the symbols in this file, 'cd' to the top directory of diff --git a/Lib/traceback.py b/Lib/traceback.py index 70d3230..61c07ff 100644 --- a/Lib/traceback.py +++ b/Lib/traceback.py @@ -1,4 +1,4 @@ -# Format and print Python stack traces +"""Extract, format and print information about Python stack traces.""" import linecache import string @@ -10,6 +10,8 @@ def _print(file, str='', terminator='\n'): def print_list(extracted_list, file=None): + """Print the list of tuples as returned by extract_tb() or + extract_stack() as a formatted stack trace to the given file.""" if not file: file = sys.stderr for filename, lineno, name, line in extracted_list: @@ -19,6 +21,12 @@ def print_list(extracted_list, file=None): _print(file, ' %s' % string.strip(line)) def format_list(extracted_list): + """Given a list of tuples as returned by extract_tb() or + extract_stack(), return a list of strings ready for printing. + Each string in the resulting list corresponds to the item with + the same index in the argument list. Each string ends in a + newline; the strings may contain internal newlines as well, for + those items whose source text line is not None.""" list = [] for filename, lineno, name, line in extracted_list: item = ' File "%s", line %d, in %s\n' % (filename,lineno,name) @@ -29,6 +37,10 @@ def format_list(extracted_list): def print_tb(tb, limit=None, file=None): + """Print up to 'limit' stack trace entries from the traceback 'tb'. + If 'limit' is omitted or None, all entries are printed. If 'file' is + omitted or None, the output goes to sys.stderr; otherwise 'file' + should be an open file or file-like object with a write() method.""" if not file: file = sys.stderr if limit is None: @@ -49,9 +61,18 @@ def print_tb(tb, limit=None, file=None): n = n+1 def format_tb(tb, limit = None): + """A shorthand for 'format_list(extract_stack(f, limit)).""" return format_list(extract_tb(tb, limit)) def extract_tb(tb, limit = None): + """Return a list of up to 'limit' pre-processed stack trace entries + extracted from the traceback object 'traceback'. This is useful for + alternate formatting of stack traces. If 'limit' is omitted or None, + all entries are extracted. A pre-processed stack trace entry is a + quadruple (filename, line number, function name, text) representing + the information that is usually printed for a stack trace. The text + is a string with leading and trailing whitespace stripped; if the + source is not available it is None.""" if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit @@ -73,6 +94,14 @@ def extract_tb(tb, limit = None): def print_exception(etype, value, tb, limit=None, file=None): + """Print exception information and up to 'limit' stack trace entries + from the traceback 'tb' to 'file'. This differs from print_tb() in + the following ways: (1) if traceback is not None, it prints a header + "Traceback (innermost last):"; (2) it prints the exception type and + value after the stack trace; (3) if type is SyntaxError and value has + the appropriate format, it prints the line where the syntax error + occurred with a caret on the next line indicating the approximate + position of the error.""" if not file: file = sys.stderr if tb: @@ -84,6 +113,12 @@ def print_exception(etype, value, tb, limit=None, file=None): _print(file, lines[-1], '') def format_exception(etype, value, tb, limit = None): + """Format a stack trace and the exception information. The arguments + have the same meaning as the corresponding arguments to + print_exception(). The return value is a list of strings, each + ending in a newline and some containing internal newlines. When + these lines are contatenated and printed, exactly the same text is + printed as does print_exception().""" if tb: list = ['Traceback (innermost last):\n'] list = list + format_tb(tb, limit) @@ -93,6 +128,14 @@ def format_exception(etype, value, tb, limit = None): return list def format_exception_only(etype, value): + """Format the exception part of a traceback. The arguments are the + exception type and value such as given by sys.last_type and + sys.last_value. The return value is a list of strings, each ending + in a newline. Normally, the list contains a single string; + however, for SyntaxError exceptions, it contains several lines that + (when printed) display detailed information about where the syntax + error occurred. The message indicating which exception occurred is + the always last string in the list.""" list = [] if type(etype) == types.ClassType: stype = etype.__name__ @@ -128,6 +171,10 @@ def format_exception_only(etype, value): def print_exc(limit=None, file=None): + """This is a shorthand for 'print_exception(sys.exc_type, + sys.exc_value, sys.exc_traceback, limit, file)'. + (In fact, it uses sys.exc_info() to retrieve the same information + in a thread-safe way.)""" if not file: file = sys.stderr try: @@ -137,6 +184,8 @@ def print_exc(limit=None, file=None): etype = value = tb = None def print_last(limit=None, file=None): + """This is a shorthand for 'print_exception(sys.last_type, + sys.last_value, sys.last_traceback, limit, file)'.""" if not file: file = sys.stderr print_exception(sys.last_type, sys.last_value, sys.last_traceback, @@ -144,6 +193,10 @@ def print_last(limit=None, file=None): def print_stack(f=None, limit=None, file=None): + """This function prints a stack trace from its invocation point. + The optional 'f' argument can be used to specify an alternate stack + frame at which to start. The optional 'limit' and 'file' arguments + have the same meaning as for print_exception().""" if f is None: try: raise ZeroDivisionError @@ -152,6 +205,7 @@ def print_stack(f=None, limit=None, file=None): print_list(extract_stack(f, limit), file) def format_stack(f=None, limit=None): + """A shorthand for 'format_list(extract_stack(f, limit))'.""" if f is None: try: raise ZeroDivisionError @@ -160,6 +214,12 @@ def format_stack(f=None, limit=None): return format_list(extract_stack(f, limit)) def extract_stack(f=None, limit = None): + """Extract the raw traceback from the current stack frame. The + return value has the same format as for extract_tb(). The optional + 'f' and 'limit' arguments have the same meaning as for print_stack(). + Each item in the list is a quadruple (filename, line number, + function name, text), and the entries are in order from outermost + to innermost stack frame.""" if f is None: try: raise ZeroDivisionError @@ -184,13 +244,14 @@ def extract_stack(f=None, limit = None): list.reverse() return list -# Calculate the correct line number of the traceback given in tb (even -# with -O on). -# Coded by Marc-Andre Lemburg from the example of PyCode_Addr2Line() -# in compile.c. -# Revised version by Jim Hugunin to work with JPython too. - def tb_lineno(tb): + """Calculate the correct line number of the traceback given in tb + (even with -O on).""" + + # Coded by Marc-Andre Lemburg from the example of PyCode_Addr2Line() + # in compile.c. + # Revised version by Jim Hugunin to work with JPython too. + c = tb.tb_frame.f_code if not hasattr(c, 'co_lnotab'): return tb.tb_lineno diff --git a/Lib/tty.py b/Lib/tty.py index 86530cf..20a31c0 100644 --- a/Lib/tty.py +++ b/Lib/tty.py @@ -1,4 +1,5 @@ -# tty.py -- Terminal utilities. +"""Terminal utilities.""" + # Author: Steen Lumholt. from TERMIOS import * @@ -13,8 +14,8 @@ ISPEED = 4 OSPEED = 5 CC = 6 -# Put terminal into a raw mode. def setraw(fd, when=TCSAFLUSH): + """Put terminal into a raw mode.""" mode = tcgetattr(fd) mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON) mode[OFLAG] = mode[OFLAG] & ~(OPOST) @@ -25,8 +26,8 @@ def setraw(fd, when=TCSAFLUSH): mode[CC][VTIME] = 0 tcsetattr(fd, when, mode) -# Put terminal into a cbreak mode. def setcbreak(fd, when=TCSAFLUSH): + """Put terminal into a cbreak mode.""" mode = tcgetattr(fd) mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON) mode[CC][VMIN] = 1 diff --git a/Lib/types.py b/Lib/types.py index d5f9255..2f4a8d4 100644 --- a/Lib/types.py +++ b/Lib/types.py @@ -1,5 +1,7 @@ -# Define names for all type symbols known in the standard interpreter. -# Types that are part of optional modules (e.g. array) are not listed. +"""Define names for all type symbols known in the standard interpreter. + +Types that are part of optional modules (e.g. array) are not listed. +""" import sys diff --git a/Lib/tzparse.py b/Lib/tzparse.py index 358e0cc..fa94bd5 100644 --- a/Lib/tzparse.py +++ b/Lib/tzparse.py @@ -1,4 +1,5 @@ -# Parse a timezone specification. +"""Parse a timezone specification.""" + # XXX Unfinished. # XXX Only the typical form "XXXhhYYY;ddd/hh,ddd/hh" is currently supported. @@ -8,6 +9,12 @@ tzpat = ('^([A-Z][A-Z][A-Z])([-+]?[0-9]+)([A-Z][A-Z][A-Z]);' tzprog = None def tzparse(tzstr): + """Given a timezone spec, return a tuple of information + (tzname, delta, dstname, daystart, hourstart, dayend, hourend), + where 'tzname' is the name of the timezone, 'delta' is the offset + in hours from GMT, 'dstname' is the name of the daylight-saving + timezone, and 'daystart'/'hourstart' and 'dayend'/'hourend' + specify the starting and ending points for daylight saving time.""" global tzprog if tzprog == None: import re @@ -24,6 +31,9 @@ def tzparse(tzstr): return (tzname, delta, dstname, daystart, hourstart, dayend, hourend) def tzlocaltime(secs, params): + """Given a Unix time in seconds and a tuple of information about + a timezone as returned by tzparse(), return the local time in the + form (year, month, day, hour, min, sec, yday, wday, tzname).""" import time (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = params year, month, days, hours, mins, secs, yday, wday, isdst = \ @@ -34,6 +44,7 @@ def tzlocaltime(secs, params): return year, month, days, hours, mins, secs, yday, wday, tzname def tzset(): + """Determine the current timezone from the "TZ" environment variable.""" global tzparams, timezone, altzone, daylight, tzname import os tzstr = os.environ['TZ'] @@ -44,6 +55,8 @@ def tzset(): tzname = tzparams[0], tzparams[2] def isdst(secs): + """Return true if daylight-saving time is in effect for the given + Unix time in the current timezone.""" import time (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = \ tzparams @@ -54,6 +67,7 @@ def isdst(secs): tzset() def localtime(secs): + """Get the local time in the current timezone.""" return tzlocaltime(secs, tzparams) def test(): diff --git a/Lib/urllib.py b/Lib/urllib.py index dbe3bee..7bc9f17 100644 --- a/Lib/urllib.py +++ b/Lib/urllib.py @@ -1,25 +1,26 @@ -# Open an arbitrary URL -# -# See the following document for more info on URLs: -# "Names and Addresses, URIs, URLs, URNs, URCs", at -# http://www.w3.org/pub/WWW/Addressing/Overview.html -# -# See also the HTTP spec (from which the error codes are derived): -# "HTTP - Hypertext Transfer Protocol", at -# http://www.w3.org/pub/WWW/Protocols/ -# -# Related standards and specs: -# - RFC1808: the "relative URL" spec. (authoritative status) -# - RFC1738 - the "URL standard". (authoritative status) -# - RFC1630 - the "URI spec". (informational status) -# -# The object returned by URLopener().open(file) will differ per -# protocol. All you know is that is has methods read(), readline(), -# readlines(), fileno(), close() and info(). The read*(), fileno() -# and close() methods work like those of open files. -# The info() method returns a mimetools.Message object which can be -# used to query various info about the object, if available. -# (mimetools.Message objects are queried with the getheader() method.) +"""Open an arbitrary URL. + +See the following document for more info on URLs: +"Names and Addresses, URIs, URLs, URNs, URCs", at +http://www.w3.org/pub/WWW/Addressing/Overview.html + +See also the HTTP spec (from which the error codes are derived): +"HTTP - Hypertext Transfer Protocol", at +http://www.w3.org/pub/WWW/Protocols/ + +Related standards and specs: +- RFC1808: the "relative URL" spec. (authoritative status) +- RFC1738 - the "URL standard". (authoritative status) +- RFC1630 - the "URI spec". (informational status) + +The object returned by URLopener().open(file) will differ per +protocol. All you know is that is has methods read(), readline(), +readlines(), fileno(), close() and info(). The read*(), fileno() +and close() methods work like those of open files. +The info() method returns a mimetools.Message object which can be +used to query various info about the object, if available. +(mimetools.Message objects are queried with the getheader() method.) +""" import string import socket @@ -69,14 +70,14 @@ def urlcleanup(): _urlopener.cleanup() -# Class to open URLs. -# This is a class rather than just a subroutine because we may need -# more than one set of global protocol-specific options. -# Note -- this is a base class for those who don't want the -# automatic handling of errors type 302 (relocated) and 401 -# (authorization needed). ftpcache = {} class URLopener: + """Class to open URLs. + This is a class rather than just a subroutine because we may need + more than one set of global protocol-specific options. + Note -- this is a base class for those who don't want the + automatic handling of errors type 302 (relocated) and 401 + (authorization needed).""" __tempfiles = None @@ -125,14 +126,14 @@ class URLopener: if self.tempcache: self.tempcache.clear() - # Add a header to be used by the HTTP interface only - # e.g. u.addheader('Accept', 'sound/basic') def addheader(self, *args): + """Add a header to be used by the HTTP interface only + e.g. u.addheader('Accept', 'sound/basic')""" self.addheaders.append(args) # External interface - # Use URLopener().open(file) instead of open(file, 'r') def open(self, fullurl, data=None): + """Use URLopener().open(file) instead of open(file, 'r').""" fullurl = unwrap(fullurl) if self.tempcache and self.tempcache.has_key(fullurl): filename, headers = self.tempcache[fullurl] @@ -163,15 +164,15 @@ class URLopener: except socket.error, msg: raise IOError, ('socket error', msg), sys.exc_info()[2] - # Overridable interface to open unknown URL type def open_unknown(self, fullurl, data=None): + """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError, ('url error', 'unknown url type', type) # External interface - # retrieve(url) returns (filename, None) for a local object - # or (tempfilename, headers) for a remote object def retrieve(self, url, filename=None, reporthook=None): + """retrieve(url) returns (filename, None) for a local object + or (tempfilename, headers) for a remote object.""" url = unwrap(url) if self.tempcache and self.tempcache.has_key(url): return self.tempcache[url] @@ -223,8 +224,8 @@ class URLopener: # Each method named open_ knows how to open that type of URL - # Use HTTP protocol def open_http(self, url, data=None): + """Use HTTP protocol.""" import httplib user_passwd = None if type(url) is type(""): @@ -276,10 +277,10 @@ class URLopener: else: return self.http_error(url, fp, errcode, errmsg, headers, data) - # Handle http errors. - # Derived class can override this, or provide specific handlers - # named http_error_DDD where DDD is the 3-digit error code def http_error(self, url, fp, errcode, errmsg, headers, data=None): + """Handle http errors. + Derived class can override this, or provide specific handlers + named http_error_DDD where DDD is the 3-digit error code.""" # First check if there's a specific handler for this error name = 'http_error_%d' % errcode if hasattr(self, name): @@ -291,15 +292,15 @@ class URLopener: if result: return result return self.http_error_default(url, fp, errcode, errmsg, headers) - # Default http error handler: close the connection and raises IOError def http_error_default(self, url, fp, errcode, errmsg, headers): + """Default error handler: close the connection and raise IOError.""" void = fp.read() fp.close() raise IOError, ('http error', errcode, errmsg, headers) - # Use HTTPS protocol if hasattr(socket, "ssl"): def open_https(self, url): + """Use HTTPS protocol.""" import httplib if type(url) is type(""): host, selector = splithost(url) @@ -333,8 +334,8 @@ class URLopener: else: return self.http_error(url, fp, errcode, errmsg, headers) - # Use Gopher protocol def open_gopher(self, url): + """Use Gopher protocol.""" import gopherlib host, selector = splithost(url) if not host: raise IOError, ('gopher error', 'no host given') @@ -349,15 +350,15 @@ class URLopener: fp = gopherlib.send_selector(selector, host) return addinfourl(fp, noheaders(), "gopher:" + url) - # Use local file or FTP depending on form of URL def open_file(self, url): + """Use local file or FTP depending on form of URL.""" if url[:2] == '//' and url[2:3] != '/': return self.open_ftp(url) else: return self.open_local_file(url) - # Use local file def open_local_file(self, url): + """Use local file.""" import mimetypes, mimetools, StringIO mtype = mimetypes.guess_type(url)[0] headers = mimetools.Message(StringIO.StringIO( @@ -379,8 +380,8 @@ class URLopener: headers, urlfile) raise IOError, ('local file error', 'not on local host') - # Use FTP protocol def open_ftp(self, url): + """Use FTP protocol.""" host, path = splithost(url) if not host: raise IOError, ('ftp error', 'no host given') host, port = splitport(host) @@ -433,8 +434,8 @@ class URLopener: except ftperrors(), msg: raise IOError, ('ftp error', msg), sys.exc_info()[2] - # Use "data" URL def open_data(self, url, data=None): + """Use "data" URL.""" # ignore POSTed data # # syntax of data URLs: @@ -474,20 +475,19 @@ class URLopener: return addinfourl(f, headers, url) -# Derived class with handlers for errors we can handle (perhaps) class FancyURLopener(URLopener): + """Derived class with handlers for errors we can handle (perhaps).""" def __init__(self, *args): apply(URLopener.__init__, (self,) + args) self.auth_cache = {} - # Default error handling -- don't raise an exception def http_error_default(self, url, fp, errcode, errmsg, headers): + """Default error handling -- don't raise an exception.""" return addinfourl(fp, headers, "http:" + url) - # Error 302 -- relocated (temporarily) - def http_error_302(self, url, fp, errcode, errmsg, headers, - data=None): + def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): + """Error 302 -- relocated (temporarily).""" # XXX The server can force infinite recursion here! if headers.has_key('location'): newurl = headers['location'] @@ -504,14 +504,14 @@ class FancyURLopener(URLopener): else: return self.open(newurl, data) - # Error 301 -- also relocated (permanently) - http_error_301 = http_error_302 + def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): + """Error 301 -- also relocated (permanently).""" + return self.http_error_302(url, fp, errcode, errmsg, headers, data) - # Error 401 -- authentication required - # See this URL for a description of the basic authentication scheme: - # http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt - def http_error_401(self, url, fp, errcode, errmsg, headers, - data=None): + def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): + """Error 401 -- authentication required. + See this URL for a description of the basic authentication scheme: + http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt""" if headers.has_key('www-authenticate'): stuff = headers['www-authenticate'] import re @@ -560,7 +560,7 @@ class FancyURLopener(URLopener): return user, passwd def prompt_user_passwd(self, host, realm): - # Override this in a GUI environment! + """Override this in a GUI environment!""" import getpass try: user = raw_input("Enter username for %s at %s: " % (realm, @@ -575,34 +575,34 @@ class FancyURLopener(URLopener): # Utility functions -# Return the IP address of the magic hostname 'localhost' _localhost = None def localhost(): + """Return the IP address of the magic hostname 'localhost'.""" global _localhost if not _localhost: _localhost = socket.gethostbyname('localhost') return _localhost -# Return the IP address of the current host _thishost = None def thishost(): + """Return the IP address of the current host.""" global _thishost if not _thishost: _thishost = socket.gethostbyname(socket.gethostname()) return _thishost -# Return the set of errors raised by the FTP class _ftperrors = None def ftperrors(): + """Return the set of errors raised by the FTP class.""" global _ftperrors if not _ftperrors: import ftplib _ftperrors = ftplib.all_errors return _ftperrors -# Return an empty mimetools.Message object _noheaders = None def noheaders(): + """Return an empty mimetools.Message object.""" global _noheaders if not _noheaders: import mimetools @@ -614,8 +614,9 @@ def noheaders(): # Utility classes -# Class used by open_ftp() for cache of open FTP connections class ftpwrapper: + """Class used by open_ftp() for cache of open FTP connections.""" + def __init__(self, user, passwd, host, port, dirs): self.user = user self.passwd = passwd @@ -623,6 +624,7 @@ class ftpwrapper: self.port = port self.dirs = dirs self.init() + def init(self): import ftplib self.busy = 0 @@ -631,6 +633,7 @@ class ftpwrapper: self.ftp.login(self.user, self.passwd) for dir in self.dirs: self.ftp.cwd(dir) + def retrfile(self, file, type): import ftplib self.endtransfer() @@ -676,6 +679,7 @@ class ftpwrapper: self.ftp.voidresp() except ftperrors(): pass + def close(self): self.endtransfer() try: @@ -683,17 +687,20 @@ class ftpwrapper: except ftperrors(): pass -# Base class for addinfo and addclosehook class addbase: + """Base class for addinfo and addclosehook.""" + def __init__(self, fp): self.fp = fp self.read = self.fp.read self.readline = self.fp.readline if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno + def __repr__(self): return '<%s at %s whose fp = %s>' % (self.__class__.__name__, `id(self)`, `self.fp`) + def close(self): self.read = None self.readline = None @@ -702,12 +709,14 @@ class addbase: if self.fp: self.fp.close() self.fp = None -# Class to add a close hook to an open file class addclosehook(addbase): + """Class to add a close hook to an open file.""" + def __init__(self, fp, closehook, *hookargs): addbase.__init__(self, fp) self.closehook = closehook self.hookargs = hookargs + def close(self): if self.closehook: apply(self.closehook, self.hookargs) @@ -715,29 +724,33 @@ class addclosehook(addbase): self.hookargs = None addbase.close(self) -# class to add an info() method to an open file class addinfo(addbase): + """class to add an info() method to an open file.""" + def __init__(self, fp, headers): addbase.__init__(self, fp) self.headers = headers + def info(self): return self.headers -# class to add info() and geturl() methods to an open file class addinfourl(addbase): + """class to add info() and geturl() methods to an open file.""" + def __init__(self, fp, headers, url): addbase.__init__(self, fp) self.headers = headers self.url = url + def info(self): return self.headers + def geturl(self): return self.url -# Utility to combine a URL with a base URL to form a new URL - def basejoin(base, url): + """Utility to combine a URL with a base URL to form a new URL.""" type, path = splittype(url) if type: # if url is complete (i.e., it contains a type), return it @@ -809,6 +822,7 @@ def basejoin(base, url): # quote('abc def') -> 'abc%20def') def unwrap(url): + """unwrap('') --> 'type://host/path'.""" url = string.strip(url) if url[:1] == '<' and url[-1:] == '>': url = string.strip(url[1:-1]) @@ -817,6 +831,7 @@ def unwrap(url): _typeprog = None def splittype(url): + """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: import re @@ -830,6 +845,7 @@ def splittype(url): _hostprog = None def splithost(url): + """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: import re @@ -841,6 +857,7 @@ def splithost(url): _userprog = None def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re @@ -852,6 +869,7 @@ def splituser(host): _passwdprog = None def splitpasswd(user): + """splitpasswd('user:passwd') -> 'user', 'passwd'.""" global _passwdprog if _passwdprog is None: import re @@ -861,8 +879,10 @@ def splitpasswd(user): if match: return match.group(1, 2) return user, None +# splittag('/path#tag') --> '/path', 'tag' _portprog = None def splitport(host): + """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: import re @@ -872,12 +892,12 @@ def splitport(host): if match: return match.group(1, 2) return host, None -# Split host and port, returning numeric port. -# Return given default port if no ':' found; defaults to -1. -# Return numerical port if a valid number are found after ':'. -# Return None if ':' but not a valid number. _nportprog = None def splitnport(host, defport=-1): + """Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number are found after ':'. + Return None if ':' but not a valid number.""" global _nportprog if _nportprog is None: import re @@ -896,6 +916,7 @@ def splitnport(host, defport=-1): _queryprog = None def splitquery(url): + """splitquery('/path?query') --> '/path', 'query'.""" global _queryprog if _queryprog is None: import re @@ -907,6 +928,7 @@ def splitquery(url): _tagprog = None def splittag(url): + """splittag('/path#tag') --> '/path', 'tag'.""" global _tagprog if _tagprog is None: import re @@ -917,11 +939,14 @@ def splittag(url): return url, None def splitattr(url): + """splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].""" words = string.splitfields(url, ';') return words[0], words[1:] _valueprog = None def splitvalue(attr): + """splitvalue('attr=value') --> 'attr', 'value'.""" global _valueprog if _valueprog is None: import re @@ -932,11 +957,13 @@ def splitvalue(attr): return attr, None def splitgophertype(selector): + """splitgophertype('/Xselector') --> 'X', 'selector'.""" if selector[:1] == '/' and selector[1:2]: return selector[1], selector[2:] return None, selector def unquote(s): + """unquote('abc%20def') -> 'abc def'.""" mychr = chr myatoi = string.atoi list = string.split(s, '%') @@ -962,6 +989,7 @@ def unquote_plus(s): always_safe = string.letters + string.digits + '_,.-' def quote(s, safe = '/'): + """quote('abc def') -> 'abc%20def').""" # XXX Can speed this up an order of magnitude safe = always_safe + safe res = list(s) @@ -983,12 +1011,13 @@ def quote_plus(s, safe = '/'): return quote(s, safe) def urlencode(dict): - l = [] - for k, v in dict.items(): - k = quote_plus(str(k)) - v = quote_plus(str(v)) - l.append(k + '=' + v) - return string.join(l, '&') + """Encode a dictionary of form entries into a URL query string.""" + l = [] + for k, v in dict.items(): + k = quote_plus(str(k)) + v = quote_plus(str(v)) + l.append(k + '=' + v) + return string.join(l, '&') # Proxy handling diff --git a/Lib/urllib2.py b/Lib/urllib2.py index 40a6715..f1b4113 100644 --- a/Lib/urllib2.py +++ b/Lib/urllib2.py @@ -1,4 +1,4 @@ -"""An extensible library for opening URLs using a variety protocols +"""An extensible library for opening URLs using a variety of protocols The simplest way to use this module is to call the urlopen function, which accepts a string containing a URL or a Request object (described diff --git a/Lib/urlparse.py b/Lib/urlparse.py index dfea52d..af111f2 100644 --- a/Lib/urlparse.py +++ b/Lib/urlparse.py @@ -1,5 +1,8 @@ -# Parse (absolute and relative) URLs. See RFC 1808: "Relative Uniform -# Resource Locators", by R. Fielding, UC Irvine, June 1995. +"""Parse (absolute and relative) URLs. + +See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, +UC Irvine, June 1995. +""" # Standard/builtin Python modules import string @@ -39,12 +42,12 @@ def clear_cache(): _parse_cache = {} -# Parse a URL into 6 components: -# :///;?# -# Return a 6-tuple: (scheme, netloc, path, params, query, fragment). -# Note that we don't break the components up in smaller bits -# (e.g. netloc is a single string) and we don't expand % escapes. def urlparse(url, scheme = '', allow_fragments = 1): + """Parse a URL into 6 components: + :///;?# + Return a 6-tuple: (scheme, netloc, path, params, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.""" key = url, scheme, allow_fragments cached = _parse_cache.get(key, None) if cached: @@ -107,11 +110,11 @@ def urlparse(url, scheme = '', allow_fragments = 1): _parse_cache[key] = tuple return tuple -# Put a parsed URL back together again. This may result in a slightly -# different, but equivalent URL, if the URL that was parsed originally -# had redundant delimiters, e.g. a ? with an empty query (the draft -# states that these are equivalent). def urlunparse((scheme, netloc, url, params, query, fragment)): + """Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).""" if netloc or (scheme in uses_netloc and url[:2] == '//'): if url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url @@ -125,9 +128,9 @@ def urlunparse((scheme, netloc, url, params, query, fragment)): url = url + '#' + fragment return url -# Join a base URL and a possibly relative URL to form an absolute -# interpretation of the latter. def urljoin(base, url, allow_fragments = 1): + """Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.""" if not base: return url bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ diff --git a/Lib/uu.py b/Lib/uu.py index 0ca4ae5..252576d 100755 --- a/Lib/uu.py +++ b/Lib/uu.py @@ -23,11 +23,12 @@ # between ascii and binary. This results in a 1000-fold speedup. The C # version is still 5 times faster, though. # - Arguments more compliant with python standard -# -# This file implements the UUencode and UUdecode functions. -# encode(in_file, out_file [,name, mode]) -# decode(in_file [, out_file, mode]) +"""Implementation of the UUencode and UUdecode functions. + +encode(in_file, out_file [,name, mode]) +decode(in_file [, out_file, mode]) +""" import binascii import os diff --git a/Lib/wave.py b/Lib/wave.py index aec9bdf..8bb9ff7 100644 --- a/Lib/wave.py +++ b/Lib/wave.py @@ -1,74 +1,75 @@ -# Stuff to parse WAVE files. -# -# Usage. -# -# Reading WAVE files: -# f = wave.open(file, 'r') -# where file is either the name of a file or an open file pointer. -# The open file pointer must have methods read(), seek(), and close(). -# When the setpos() and rewind() methods are not used, the seek() -# method is not necessary. -# -# This returns an instance of a class with the following public methods: -# getnchannels() -- returns number of audio channels (1 for -# mono, 2 for stereo) -# getsampwidth() -- returns sample width in bytes -# getframerate() -- returns sampling frequency -# getnframes() -- returns number of audio frames -# getcomptype() -- returns compression type ('NONE' for linear samples) -# getcompname() -- returns human-readable version of -# compression type ('not compressed' linear samples) -# getparams() -- returns a tuple consisting of all of the -# above in the above order -# getmarkers() -- returns None (for compatibility with the -# aifc module) -# getmark(id) -- raises an error since the mark does not -# exist (for compatibility with the aifc module) -# readframes(n) -- returns at most n frames of audio -# rewind() -- rewind to the beginning of the audio stream -# setpos(pos) -- seek to the specified position -# tell() -- return the current position -# close() -- close the instance (make it unusable) -# The position returned by tell() and the position given to setpos() -# are compatible and have nothing to do with the actual postion in the -# file. -# The close() method is called automatically when the class instance -# is destroyed. -# -# Writing WAVE files: -# f = wave.open(file, 'w') -# where file is either the name of a file or an open file pointer. -# The open file pointer must have methods write(), tell(), seek(), and -# close(). -# -# This returns an instance of a class with the following public methods: -# setnchannels(n) -- set the number of channels -# setsampwidth(n) -- set the sample width -# setframerate(n) -- set the frame rate -# setnframes(n) -- set the number of frames -# setcomptype(type, name) -# -- set the compression type and the -# human-readable compression type -# setparams(tuple) -# -- set all parameters at once -# tell() -- return current position in output file -# writeframesraw(data) -# -- write audio frames without pathing up the -# file header -# writeframes(data) -# -- write audio frames and patch up the file header -# close() -- patch up the file header and close the -# output file -# You should set the parameters before the first writeframesraw or -# writeframes. The total number of frames does not need to be set, -# but when it is set to the correct value, the header does not have to -# be patched up. -# It is best to first set all parameters, perhaps possibly the -# compression type, and then write audio frames using writeframesraw. -# When all frames have been written, either call writeframes('') or -# close() to patch up the sizes in the header. -# The close() method is called automatically when the class instance -# is destroyed. +"""Stuff to parse WAVE files. + +Usage. + +Reading WAVE files: + f = wave.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +When the setpos() and rewind() methods are not used, the seek() +method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for linear samples) + getcompname() -- returns human-readable version of + compression type ('not compressed' linear samples) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- returns None (for compatibility with the + aifc module) + getmark(id) -- raises an error since the mark does not + exist (for compatibility with the aifc module) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell() and the position given to setpos() +are compatible and have nothing to do with the actual postion in the +file. +The close() method is called automatically when the class instance +is destroyed. + +Writing WAVE files: + f = wave.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + tell() -- return current position in output file + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +The close() method is called automatically when the class instance +is destroyed. +""" import __builtin__ @@ -81,391 +82,393 @@ _array_fmts = None, 'b', 'h', None, 'l' # Determine endian-ness import struct if struct.pack("h", 1) == "\000\001": - big_endian = 1 + big_endian = 1 else: - big_endian = 0 + big_endian = 0 from chunk import Chunk class Wave_read: - # Variables used in this class: - # - # These variables are available to the user though appropriate - # methods of this class: - # _file -- the open file with methods read(), close(), and seek() - # set through the __init__() method - # _nchannels -- the number of audio channels - # available through the getnchannels() method - # _nframes -- the number of audio frames - # available through the getnframes() method - # _sampwidth -- the number of bytes per audio sample - # available through the getsampwidth() method - # _framerate -- the sampling frequency - # available through the getframerate() method - # _comptype -- the AIFF-C compression type ('NONE' if AIFF) - # available through the getcomptype() method - # _compname -- the human-readable AIFF-C compression type - # available through the getcomptype() method - # _soundpos -- the position in the audio stream - # available through the tell() method, set through the - # setpos() method - # - # These variables are used internally only: - # _fmt_chunk_read -- 1 iff the FMT chunk has been read - # _data_seek_needed -- 1 iff positioned correctly in audio - # file for readframes() - # _data_chunk -- instantiation of a chunk class for the DATA chunk - # _framesize -- size of one frame in the file - - def initfp(self, file): - self._convert = None - self._soundpos = 0 - self._file = Chunk(file, bigendian = 0) - if self._file.getname() != 'RIFF': - raise Error, 'file does not start with RIFF id' - if self._file.read(4) != 'WAVE': - raise Error, 'not a WAVE file' - self._fmt_chunk_read = 0 - self._data_chunk = None - while 1: - self._data_seek_needed = 1 - try: - chunk = Chunk(self._file, bigendian = 0) - except EOFError: - break - chunkname = chunk.getname() - if chunkname == 'fmt ': - self._read_fmt_chunk(chunk) - self._fmt_chunk_read = 1 - elif chunkname == 'data': - if not self._fmt_chunk_read: - raise Error, 'data chunk before fmt chunk' - self._data_chunk = chunk - self._nframes = chunk.chunksize / self._framesize - self._data_seek_needed = 0 - break - chunk.skip() - if not self._fmt_chunk_read or not self._data_chunk: - raise Error, 'fmt chunk and/or data chunk missing' - - def __init__(self, f): - if type(f) == type(''): - f = __builtin__.open(f, 'rb') - # else, assume it is an open file object already - self.initfp(f) - - # - # User visible methods. - # - def getfp(self): - return self._file - - def rewind(self): - self._data_seek_needed = 1 - self._soundpos = 0 - - def close(self): - self._file = None - - def tell(self): - return self._soundpos - - def getnchannels(self): - return self._nchannels - - def getnframes(self): - return self._nframes - - def getsampwidth(self): - return self._sampwidth - - def getframerate(self): - return self._framerate - - def getcomptype(self): - return self._comptype - - def getcompname(self): - return self._compname - - def getparams(self): - return self.getnchannels(), self.getsampwidth(), \ - self.getframerate(), self.getnframes(), \ - self.getcomptype(), self.getcompname() - - def getmarkers(self): - return None - - def getmark(self, id): - raise Error, 'no marks' - - def setpos(self, pos): - if pos < 0 or pos > self._nframes: - raise Error, 'position not in range' - self._soundpos = pos - self._data_seek_needed = 1 - - def readframes(self, nframes): - if self._data_seek_needed: - self._data_chunk.seek(0, 0) - pos = self._soundpos * self._framesize - if pos: - self._data_chunk.seek(pos, 0) - self._data_seek_needed = 0 - if nframes == 0: - return '' - if self._sampwidth > 1 and big_endian: - # unfortunately the fromfile() method does not take - # something that only looks like a file object, so - # we have to reach into the innards of the chunk object - import array - chunk = self._data_chunk - data = array.array(_array_fmts[self._sampwidth]) - nitems = nframes * self._nchannels - if nitems * self._sampwidth > chunk.chunksize - chunk.size_read: - nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth - data.fromfile(chunk.file.file, nitems) - # "tell" data chunk how much was read - chunk.size_read = chunk.size_read + nitems * self._sampwidth - # do the same for the outermost chunk - chunk = chunk.file - chunk.size_read = chunk.size_read + nitems * self._sampwidth - data.byteswap() - data = data.tostring() - else: - data = self._data_chunk.read(nframes * self._framesize) - if self._convert and data: - data = self._convert(data) - self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth) - return data - - # - # Internal methods. - # - - def _read_fmt_chunk(self, chunk): - wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack(' self._nframes: + raise Error, 'position not in range' + self._soundpos = pos + self._data_seek_needed = 1 + + def readframes(self, nframes): + if self._data_seek_needed: + self._data_chunk.seek(0, 0) + pos = self._soundpos * self._framesize + if pos: + self._data_chunk.seek(pos, 0) + self._data_seek_needed = 0 + if nframes == 0: + return '' + if self._sampwidth > 1 and big_endian: + # unfortunately the fromfile() method does not take + # something that only looks like a file object, so + # we have to reach into the innards of the chunk object + import array + chunk = self._data_chunk + data = array.array(_array_fmts[self._sampwidth]) + nitems = nframes * self._nchannels + if nitems * self._sampwidth > chunk.chunksize - chunk.size_read: + nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth + data.fromfile(chunk.file.file, nitems) + # "tell" data chunk how much was read + chunk.size_read = chunk.size_read + nitems * self._sampwidth + # do the same for the outermost chunk + chunk = chunk.file + chunk.size_read = chunk.size_read + nitems * self._sampwidth + data.byteswap() + data = data.tostring() + else: + data = self._data_chunk.read(nframes * self._framesize) + if self._convert and data: + data = self._convert(data) + self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth) + return data + + # + # Internal methods. + # + + def _read_fmt_chunk(self, chunk): + wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack(' 4: - raise Error, 'bad sample width' - self._sampwidth = sampwidth - - def getsampwidth(self): - if not self._sampwidth: - raise Error, 'sample width not set' - return self._sampwidth - - def setframerate(self, framerate): - if self._datawritten: - raise Error, 'cannot change parameters after starting to write' - if framerate <= 0: - raise Error, 'bad frame rate' - self._framerate = framerate - - def getframerate(self): - if not self._framerate: - raise Error, 'frame rate not set' - return self._framerate - - def setnframes(self, nframes): - if self._datawritten: - raise Error, 'cannot change parameters after starting to write' - self._nframes = nframes - - def getnframes(self): - return self._nframeswritten - - def setcomptype(self, comptype, compname): - if self._datawritten: - raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE',): - raise Error, 'unsupported compression type' - self._comptype = comptype - self._compname = compname - - def getcomptype(self): - return self._comptype - - def getcompname(self): - return self._compname - - def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)): - if self._datawritten: - raise Error, 'cannot change parameters after starting to write' - self.setnchannels(nchannels) - self.setsampwidth(sampwidth) - self.setframerate(framerate) - self.setnframes(nframes) - self.setcomptype(comptype, compname) - - def getparams(self): - if not self._nchannels or not self._sampwidth or not self._framerate: - raise Error, 'not all parameters set' - return self._nchannels, self._sampwidth, self._framerate, \ - self._nframes, self._comptype, self._compname - - def setmark(self, id, pos, name): - raise Error, 'setmark() not supported' - - def getmark(self, id): - raise Error, 'no marks' - - def getmarkers(self): - return None - - def tell(self): - return self._nframeswritten - - def writeframesraw(self, data): - self._ensure_header_written(len(data)) - nframes = len(data) / (self._sampwidth * self._nchannels) - if self._convert: - data = self._convert(data) - if self._sampwidth > 1 and big_endian: - import array - data = array.array(_array_fmts[self._sampwidth], data) - data.byteswap() - data.tofile(self._file) - self._datawritten = self._datawritten + len(data) * self._sampwidth - else: - self._file.write(data) - self._datawritten = self._datawritten + len(data) - self._nframeswritten = self._nframeswritten + nframes - - def writeframes(self, data): - self.writeframesraw(data) - if self._datalength != self._datawritten: - self._patchheader() - - def close(self): - self._ensure_header_written(0) - if self._datalength != self._datawritten: - self._patchheader() - self._file.flush() - self._file = None - - # - # Internal methods. - # - - def _ensure_header_written(self, datasize): - if not self._datawritten: - if not self._nchannels: - raise Error, '# channels not specified' - if not self._sampwidth: - raise Error, 'sample width not specified' - if not self._framerate: - raise Error, 'sampling rate not specified' - self._write_header(datasize) - - def _write_header(self, initlength): - self._file.write('RIFF') - if not self._nframes: - self._nframes = initlength / (self._nchannels * self._sampwidth) - self._datalength = self._nframes * self._nchannels * self._sampwidth - self._form_length_pos = self._file.tell() - self._file.write(struct.pack(' 4: + raise Error, 'bad sample width' + self._sampwidth = sampwidth + + def getsampwidth(self): + if not self._sampwidth: + raise Error, 'sample width not set' + return self._sampwidth + + def setframerate(self, framerate): + if self._datawritten: + raise Error, 'cannot change parameters after starting to write' + if framerate <= 0: + raise Error, 'bad frame rate' + self._framerate = framerate + + def getframerate(self): + if not self._framerate: + raise Error, 'frame rate not set' + return self._framerate + + def setnframes(self, nframes): + if self._datawritten: + raise Error, 'cannot change parameters after starting to write' + self._nframes = nframes + + def getnframes(self): + return self._nframeswritten + + def setcomptype(self, comptype, compname): + if self._datawritten: + raise Error, 'cannot change parameters after starting to write' + if comptype not in ('NONE',): + raise Error, 'unsupported compression type' + self._comptype = comptype + self._compname = compname + + def getcomptype(self): + return self._comptype + + def getcompname(self): + return self._compname + + def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)): + if self._datawritten: + raise Error, 'cannot change parameters after starting to write' + self.setnchannels(nchannels) + self.setsampwidth(sampwidth) + self.setframerate(framerate) + self.setnframes(nframes) + self.setcomptype(comptype, compname) + + def getparams(self): + if not self._nchannels or not self._sampwidth or not self._framerate: + raise Error, 'not all parameters set' + return self._nchannels, self._sampwidth, self._framerate, \ + self._nframes, self._comptype, self._compname + + def setmark(self, id, pos, name): + raise Error, 'setmark() not supported' + + def getmark(self, id): + raise Error, 'no marks' + + def getmarkers(self): + return None + + def tell(self): + return self._nframeswritten + + def writeframesraw(self, data): + self._ensure_header_written(len(data)) + nframes = len(data) / (self._sampwidth * self._nchannels) + if self._convert: + data = self._convert(data) + if self._sampwidth > 1 and big_endian: + import array + data = array.array(_array_fmts[self._sampwidth], data) + data.byteswap() + data.tofile(self._file) + self._datawritten = self._datawritten + len(data) * self._sampwidth + else: + self._file.write(data) + self._datawritten = self._datawritten + len(data) + self._nframeswritten = self._nframeswritten + nframes + + def writeframes(self, data): + self.writeframesraw(data) + if self._datalength != self._datawritten: + self._patchheader() + + def close(self): + self._ensure_header_written(0) + if self._datalength != self._datawritten: + self._patchheader() + self._file.flush() + self._file = None + + # + # Internal methods. + # + + def _ensure_header_written(self, datasize): + if not self._datawritten: + if not self._nchannels: + raise Error, '# channels not specified' + if not self._sampwidth: + raise Error, 'sample width not specified' + if not self._framerate: + raise Error, 'sampling rate not specified' + self._write_header(datasize) + + def _write_header(self, initlength): + self._file.write('RIFF') + if not self._nframes: + self._nframes = initlength / (self._nchannels * self._sampwidth) + self._datalength = self._nframes * self._nchannels * self._sampwidth + self._form_length_pos = self._file.tell() + self._file.write(struct.pack('