summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rwxr-xr-xLib/dos-8x3/basehttp.py482
-rwxr-xr-xLib/dos-8x3/bastion.py174
-rwxr-xr-xLib/dos-8x3/cgihttps.py305
-rwxr-xr-xLib/dos-8x3/compilea.py128
-rw-r--r--Lib/dos-8x3/configpa.py469
-rw-r--r--Lib/dos-8x3/cookie.py726
-rw-r--r--Lib/dos-8x3/fileinpu.py268
-rwxr-xr-xLib/dos-8x3/formatte.py422
-rwxr-xr-xLib/dos-8x3/gopherli.py208
-rwxr-xr-xLib/dos-8x3/htmlenti.py257
-rwxr-xr-xLib/dos-8x3/linecach.py92
-rwxr-xr-xLib/dos-8x3/macurl2p.py94
-rwxr-xr-xLib/dos-8x3/mimetool.py229
-rw-r--r--Lib/dos-8x3/mimetype.py237
-rw-r--r--Lib/dos-8x3/mimewrit.py128
-rwxr-xr-xLib/dos-8x3/multifil.py164
-rwxr-xr-xLib/dos-8x3/nturl2pa.py66
-rwxr-xr-xLib/dos-8x3/posixfil.py229
-rwxr-xr-xLib/dos-8x3/posixpat.py368
-rwxr-xr-xLib/dos-8x3/py_compi.py80
-rwxr-xr-xLib/dos-8x3/queue.py138
-rw-r--r--Lib/dos-8x3/reconver.py186
-rwxr-xr-xLib/dos-8x3/regex_sy.py53
-rw-r--r--Lib/dos-8x3/regex_te.py289
-rw-r--r--Lib/dos-8x3/rlcomple.py120
-rw-r--r--Lib/dos-8x3/robotpar.py97
-rwxr-xr-xLib/dos-8x3/simpleht.py198
-rwxr-xr-xLib/dos-8x3/socketse.py447
-rw-r--r--Lib/dos-8x3/sre_comp.py381
-rw-r--r--Lib/dos-8x3/sre_cons.py228
-rw-r--r--Lib/dos-8x3/sre_pars.py682
-rwxr-xr-xLib/dos-8x3/statcach.py75
-rw-r--r--Lib/dos-8x3/string_t.py202
-rwxr-xr-xLib/dos-8x3/stringio.py193
-rw-r--r--Lib/dos-8x3/stringol.py431
-rw-r--r--Lib/dos-8x3/telnetli.py503
-rw-r--r--Lib/dos-8x3/test_arr.py188
-rw-r--r--Lib/dos-8x3/test_ate.py24
-rwxr-xr-xLib/dos-8x3/test_aud.py264
-rw-r--r--Lib/dos-8x3/test_aug.py232
-rw-r--r--Lib/dos-8x3/test_bin.py112
-rw-r--r--Lib/dos-8x3/test_bsd.py74
-rwxr-xr-xLib/dos-8x3/test_bui.py13
-rw-r--r--Lib/dos-8x3/test_cfg.py141
-rw-r--r--Lib/dos-8x3/test_cla.py219
-rw-r--r--Lib/dos-8x3/test_cma.py35
-rw-r--r--Lib/dos-8x3/test_com.py16
-rw-r--r--Lib/dos-8x3/test_con.py168
-rw-r--r--Lib/dos-8x3/test_coo.py40
-rw-r--r--Lib/dos-8x3/test_cop.py35
-rw-r--r--Lib/dos-8x3/test_cpi.py5
-rw-r--r--Lib/dos-8x3/test_cry.py11
-rw-r--r--Lib/dos-8x3/test_dos.py49
-rw-r--r--Lib/dos-8x3/test_err.py49
-rwxr-xr-xLib/dos-8x3/test_exc.py170
-rw-r--r--Lib/dos-8x3/test_ext.py146
-rw-r--r--Lib/dos-8x3/test_fcn.py36
-rw-r--r--Lib/dos-8x3/test_fil.py45
-rw-r--r--Lib/dos-8x3/test_for.py78
-rw-r--r--Lib/dos-8x3/test_gdb.py40
-rw-r--r--Lib/dos-8x3/test_get.py101
-rwxr-xr-xLib/dos-8x3/test_gra.py649
-rw-r--r--Lib/dos-8x3/test_gzi.py54
-rw-r--r--Lib/dos-8x3/test_has.py26
-rw-r--r--Lib/dos-8x3/test_ima.py171
-rw-r--r--Lib/dos-8x3/test_img.py117
-rw-r--r--Lib/dos-8x3/test_imp.py44
-rw-r--r--Lib/dos-8x3/test_lar.py129
-rw-r--r--Lib/dos-8x3/test_lin.py89
-rw-r--r--Lib/dos-8x3/test_lon.py260
-rw-r--r--Lib/dos-8x3/test_mat.py195
-rw-r--r--Lib/dos-8x3/test_mim.py170
-rw-r--r--Lib/dos-8x3/test_min.py413
-rw-r--r--Lib/dos-8x3/test_mma.py121
-rw-r--r--Lib/dos-8x3/test_ntp.py51
-rwxr-xr-xLib/dos-8x3/test_opc.py101
-rwxr-xr-xLib/dos-8x3/test_ope.py22
-rw-r--r--Lib/dos-8x3/test_par.py178
-rw-r--r--Lib/dos-8x3/test_pic.py143
-rw-r--r--Lib/dos-8x3/test_pol.py172
-rw-r--r--Lib/dos-8x3/test_pop.py65
-rw-r--r--Lib/dos-8x3/test_pos.py42
-rw-r--r--Lib/dos-8x3/test_pye.py152
-rw-r--r--Lib/dos-8x3/test_reg.py110
-rw-r--r--Lib/dos-8x3/test_rfc.py126
-rwxr-xr-xLib/dos-8x3/test_rgb.py63
-rw-r--r--Lib/dos-8x3/test_rot.py28
-rwxr-xr-xLib/dos-8x3/test_sel.py63
-rwxr-xr-xLib/dos-8x3/test_sig.py66
-rw-r--r--Lib/dos-8x3/test_soc.py146
-rw-r--r--Lib/dos-8x3/test_str.py37
-rw-r--r--Lib/dos-8x3/test_sun.py20
-rwxr-xr-xLib/dos-8x3/test_sup.py73
-rwxr-xr-xLib/dos-8x3/test_thr.py113
-rw-r--r--Lib/dos-8x3/test_tim.py39
-rw-r--r--Lib/dos-8x3/test_tok.py10
-rwxr-xr-xLib/dos-8x3/test_typ.py267
-rw-r--r--Lib/dos-8x3/test_uni.py518
-rw-r--r--Lib/dos-8x3/test_unp.py144
-rw-r--r--Lib/dos-8x3/test_url.py32
-rw-r--r--Lib/dos-8x3/test_use.py101
-rw-r--r--Lib/dos-8x3/test_wav.py34
-rw-r--r--Lib/dos-8x3/test_win.py147
-rw-r--r--Lib/dos-8x3/test_xml.py25
-rw-r--r--Lib/dos-8x3/test_zip.py26
-rw-r--r--Lib/dos-8x3/test_zli.py161
-rw-r--r--Lib/dos-8x3/threadin.py631
-rw-r--r--Lib/dos-8x3/tokenize.py161
-rwxr-xr-xLib/dos-8x3/tracebac.py274
-rwxr-xr-xLib/dos-8x3/userdict.py40
-rwxr-xr-xLib/dos-8x3/userlist.py79
-rw-r--r--Lib/dos-8x3/userstri.py171
-rw-r--r--Lib/dos-8x3/webbrows.py225
113 files changed, 0 insertions, 18934 deletions
diff --git a/Lib/dos-8x3/basehttp.py b/Lib/dos-8x3/basehttp.py
deleted file mode 100755
index 49f8984..0000000
--- a/Lib/dos-8x3/basehttp.py
+++ /dev/null
@@ -1,482 +0,0 @@
-"""HTTP server base class.
-
-Note: the class in this module doesn't implement any HTTP request; see
-SimpleHTTPServer for simple implementations of GET, HEAD and POST
-(including CGI scripts).
-
-Contents:
-
-- BaseHTTPRequestHandler: HTTP request handler base class
-- test: test function
-
-XXX To do:
-
-- send server version
-- log requests even later (to capture byte count)
-- log user-agent header and other interesting goodies
-- send error log to separate file
-- are request names really case sensitive?
-
-"""
-
-
-# See also:
-#
-# HTTP Working Group T. Berners-Lee
-# INTERNET-DRAFT R. T. Fielding
-# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
-# Expires September 8, 1995 March 8, 1995
-#
-# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
-
-
-# Log files
-# ---------
-#
-# Here's a quote from the NCSA httpd docs about log file format.
-#
-# | The logfile format is as follows. Each line consists of:
-# |
-# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
-# |
-# | host: Either the DNS name or the IP number of the remote client
-# | rfc931: Any information returned by identd for this person,
-# | - otherwise.
-# | authuser: If user sent a userid for authentication, the user name,
-# | - otherwise.
-# | DD: Day
-# | Mon: Month (calendar name)
-# | YYYY: Year
-# | hh: hour (24-hour format, the machine's timezone)
-# | mm: minutes
-# | ss: seconds
-# | request: The first line of the HTTP request as sent by the client.
-# | ddd: the status code returned by the server, - if not available.
-# | bbbb: the total number of bytes sent,
-# | *not including the HTTP/1.0 header*, - if not available
-# |
-# | You can determine the name of the file accessed through request.
-#
-# (Actually, the latter is only true if you know the server configuration
-# at the time the request was made!)
-
-
-__version__ = "0.2"
-
-
-import sys
-import time
-import socket # For gethostbyaddr()
-import string
-import mimetools
-import SocketServer
-
-# Default error message
-DEFAULT_ERROR_MESSAGE = """\
-<head>
-<title>Error response</title>
-</head>
-<body>
-<h1>Error response</h1>
-<p>Error code %(code)d.
-<p>Message: %(message)s.
-<p>Error code explanation: %(code)s = %(explain)s.
-</body>
-"""
-
-
-class HTTPServer(SocketServer.TCPServer):
-
- allow_reuse_address = 1 # Seems to make sense in testing environment
-
- def server_bind(self):
- """Override server_bind to store the server name."""
- SocketServer.TCPServer.server_bind(self)
- host, port = self.socket.getsockname()
- self.server_name = socket.getfqdn(host)
- self.server_port = port
-
-
-class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
-
- """HTTP request handler base class.
-
- The following explanation of HTTP serves to guide you through the
- code as well as to expose any misunderstandings I may have about
- HTTP (so you don't need to read the code to figure out I'm wrong
- :-).
-
- HTTP (HyperText Transfer Protocol) is an extensible protocol on
- top of a reliable stream transport (e.g. TCP/IP). The protocol
- recognizes three parts to a request:
-
- 1. One line identifying the request type and path
- 2. An optional set of RFC-822-style headers
- 3. An optional data part
-
- The headers and data are separated by a blank line.
-
- The first line of the request has the form
-
- <command> <path> <version>
-
- where <command> is a (case-sensitive) keyword such as GET or POST,
- <path> is a string containing path information for the request,
- and <version> should be the string "HTTP/1.0". <path> is encoded
- using the URL encoding scheme (using %xx to signify the ASCII
- character with hex code xx).
-
- The protocol is vague about whether lines are separated by LF
- characters or by CRLF pairs -- for compatibility with the widest
- range of clients, both should be accepted. Similarly, whitespace
- in the request line should be treated sensibly (allowing multiple
- spaces between components and allowing trailing whitespace).
-
- Similarly, for output, lines ought to be separated by CRLF pairs
- but most clients grok LF characters just fine.
-
- If the first line of the request has the form
-
- <command> <path>
-
- (i.e. <version> is left out) then this is assumed to be an HTTP
- 0.9 request; this form has no optional headers and data part and
- the reply consists of just the data.
-
- The reply form of the HTTP 1.0 protocol again has three parts:
-
- 1. One line giving the response code
- 2. An optional set of RFC-822-style headers
- 3. The data
-
- Again, the headers and data are separated by a blank line.
-
- The response code line has the form
-
- <version> <responsecode> <responsestring>
-
- where <version> is the protocol version (always "HTTP/1.0"),
- <responsecode> is a 3-digit response code indicating success or
- failure of the request, and <responsestring> is an optional
- human-readable string explaining what the response code means.
-
- This server parses the request and the headers, and then calls a
- function specific to the request type (<command>). Specifically,
- a request SPAM will be handled by a method do_SPAM(). If no
- such method exists the server sends an error response to the
- client. If it exists, it is called with no arguments:
-
- do_SPAM()
-
- Note that the request name is case sensitive (i.e. SPAM and spam
- are different requests).
-
- The various request details are stored in instance variables:
-
- - client_address is the client IP address in the form (host,
- port);
-
- - command, path and version are the broken-down request line;
-
- - headers is an instance of mimetools.Message (or a derived
- class) containing the header information;
-
- - rfile is a file object open for reading positioned at the
- start of the optional input data part;
-
- - wfile is a file object open for writing.
-
- IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
-
- The first thing to be written must be the response line. Then
- follow 0 or more header lines, then a blank line, and then the
- actual data (if any). The meaning of the header lines depends on
- the command executed by the server; in most cases, when data is
- returned, there should be at least one header line of the form
-
- Content-type: <type>/<subtype>
-
- where <type> and <subtype> should be registered MIME types,
- e.g. "text/html" or "text/plain".
-
- """
-
- # The Python system version, truncated to its first component.
- sys_version = "Python/" + string.split(sys.version)[0]
-
- # The server software version. You may want to override this.
- # The format is multiple whitespace-separated strings,
- # where each string is of the form name[/version].
- server_version = "BaseHTTP/" + __version__
-
- def parse_request(self):
- """Parse a request (internal).
-
- The request should be stored in self.raw_request; the results
- are in self.command, self.path, self.request_version and
- self.headers.
-
- Return value is 1 for success, 0 for failure; on failure, an
- error is sent back.
-
- """
- self.request_version = version = "HTTP/0.9" # Default
- requestline = self.raw_requestline
- if requestline[-2:] == '\r\n':
- requestline = requestline[:-2]
- elif requestline[-1:] == '\n':
- requestline = requestline[:-1]
- self.requestline = requestline
- words = string.split(requestline)
- if len(words) == 3:
- [command, path, version] = words
- if version[:5] != 'HTTP/':
- self.send_error(400, "Bad request version (%s)" % `version`)
- return 0
- elif len(words) == 2:
- [command, path] = words
- if command != 'GET':
- self.send_error(400,
- "Bad HTTP/0.9 request type (%s)" % `command`)
- return 0
- else:
- self.send_error(400, "Bad request syntax (%s)" % `requestline`)
- return 0
- self.command, self.path, self.request_version = command, path, version
- self.headers = self.MessageClass(self.rfile, 0)
- return 1
-
- def handle(self):
- """Handle a single HTTP request.
-
- You normally don't need to override this method; see the class
- __doc__ string for information on how to handle specific HTTP
- commands such as GET and POST.
-
- """
-
- self.raw_requestline = self.rfile.readline()
- if not self.parse_request(): # An error code has been sent, just exit
- return
- mname = 'do_' + self.command
- if not hasattr(self, mname):
- self.send_error(501, "Unsupported method (%s)" % `self.command`)
- return
- method = getattr(self, mname)
- method()
-
- def send_error(self, code, message=None):
- """Send and log an error reply.
-
- Arguments are the error code, and a detailed message.
- The detailed message defaults to the short entry matching the
- response code.
-
- This sends an error response (so it must be called before any
- output has been generated), logs the error, and finally sends
- a piece of HTML explaining the error to the user.
-
- """
-
- try:
- short, long = self.responses[code]
- except KeyError:
- short, long = '???', '???'
- if not message:
- message = short
- explain = long
- self.log_error("code %d, message %s", code, message)
- self.send_response(code, message)
- self.end_headers()
- self.wfile.write(self.error_message_format %
- {'code': code,
- 'message': message,
- 'explain': explain})
-
- error_message_format = DEFAULT_ERROR_MESSAGE
-
- def send_response(self, code, message=None):
- """Send the response header and log the response code.
-
- Also send two standard headers with the server software
- version and the current date.
-
- """
- self.log_request(code)
- if message is None:
- if self.responses.has_key(code):
- message = self.responses[code][0]
- else:
- message = ''
- if self.request_version != 'HTTP/0.9':
- self.wfile.write("%s %s %s\r\n" %
- (self.protocol_version, str(code), message))
- self.send_header('Server', self.version_string())
- self.send_header('Date', self.date_time_string())
-
- def send_header(self, keyword, value):
- """Send a MIME header."""
- if self.request_version != 'HTTP/0.9':
- self.wfile.write("%s: %s\r\n" % (keyword, value))
-
- def end_headers(self):
- """Send the blank line ending the MIME headers."""
- if self.request_version != 'HTTP/0.9':
- self.wfile.write("\r\n")
-
- def log_request(self, code='-', size='-'):
- """Log an accepted request.
-
- This is called by send_reponse().
-
- """
-
- self.log_message('"%s" %s %s',
- self.requestline, str(code), str(size))
-
- def log_error(self, *args):
- """Log an error.
-
- This is called when a request cannot be fulfilled. By
- default it passes the message on to log_message().
-
- Arguments are the same as for log_message().
-
- XXX This should go to the separate error log.
-
- """
-
- apply(self.log_message, args)
-
- def log_message(self, format, *args):
- """Log an arbitrary message.
-
- This is used by all other logging functions. Override
- it if you have specific logging wishes.
-
- The first argument, FORMAT, is a format string for the
- message to be logged. If the format string contains
- any % escapes requiring parameters, they should be
- specified as subsequent arguments (it's just like
- printf!).
-
- The client host and current date/time are prefixed to
- every message.
-
- """
-
- sys.stderr.write("%s - - [%s] %s\n" %
- (self.address_string(),
- self.log_date_time_string(),
- format%args))
-
- def version_string(self):
- """Return the server software version string."""
- return self.server_version + ' ' + self.sys_version
-
- def date_time_string(self):
- """Return the current date and time formatted for a message header."""
- now = time.time()
- year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
- s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
- self.weekdayname[wd],
- day, self.monthname[month], year,
- hh, mm, ss)
- return s
-
- def log_date_time_string(self):
- """Return the current time formatted for logging."""
- now = time.time()
- year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
- s = "%02d/%3s/%04d %02d:%02d:%02d" % (
- day, self.monthname[month], year, hh, mm, ss)
- return s
-
- weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-
- monthname = [None,
- 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-
- def address_string(self):
- """Return the client address formatted for logging.
-
- This version looks up the full hostname using gethostbyaddr(),
- and tries to find a name that contains at least one dot.
-
- """
-
- host, port = self.client_address
- return socket.getfqdn(host)
-
- # Essentially static class variables
-
- # The version of the HTTP protocol we support.
- # Don't override unless you know what you're doing (hint: incoming
- # requests are required to have exactly this version string).
- protocol_version = "HTTP/1.0"
-
- # The Message-like class used to parse headers
- MessageClass = mimetools.Message
-
- # Table mapping response codes to messages; entries have the
- # form {code: (shortmessage, longmessage)}.
- # See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
- responses = {
- 200: ('OK', 'Request fulfilled, document follows'),
- 201: ('Created', 'Document created, URL follows'),
- 202: ('Accepted',
- 'Request accepted, processing continues off-line'),
- 203: ('Partial information', 'Request fulfilled from cache'),
- 204: ('No response', 'Request fulfilled, nothing follows'),
-
- 301: ('Moved', 'Object moved permanently -- see URI list'),
- 302: ('Found', 'Object moved temporarily -- see URI list'),
- 303: ('Method', 'Object moved -- see Method and URL list'),
- 304: ('Not modified',
- 'Document has not changed singe given time'),
-
- 400: ('Bad request',
- 'Bad request syntax or unsupported method'),
- 401: ('Unauthorized',
- 'No permission -- see authorization schemes'),
- 402: ('Payment required',
- 'No payment -- see charging schemes'),
- 403: ('Forbidden',
- 'Request forbidden -- authorization will not help'),
- 404: ('Not found', 'Nothing matches the given URI'),
-
- 500: ('Internal error', 'Server got itself in trouble'),
- 501: ('Not implemented',
- 'Server does not support this operation'),
- 502: ('Service temporarily overloaded',
- 'The server cannot process the request due to a high load'),
- 503: ('Gateway timeout',
- 'The gateway server did not receive a timely response'),
-
- }
-
-
-def test(HandlerClass = BaseHTTPRequestHandler,
- ServerClass = HTTPServer):
- """Test the HTTP request handler class.
-
- This runs an HTTP server on port 8000 (or the first command line
- argument).
-
- """
-
- if sys.argv[1:]:
- port = string.atoi(sys.argv[1])
- else:
- port = 8000
- server_address = ('', port)
-
- httpd = ServerClass(server_address, HandlerClass)
-
- print "Serving HTTP on port", port, "..."
- httpd.serve_forever()
-
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/bastion.py b/Lib/dos-8x3/bastion.py
deleted file mode 100755
index a6e716b..0000000
--- a/Lib/dos-8x3/bastion.py
+++ /dev/null
@@ -1,174 +0,0 @@
-"""Bastionification utility.
-
-A bastion (for another object -- the 'original') is an object that has
-the same methods as the original but does not give access to its
-instance variables. Bastions have a number of uses, but the most
-obvious one is to provide code executing in restricted mode with a
-safe interface to an object implemented in unrestricted mode.
-
-The bastionification routine has an optional second argument which is
-a filter function. Only those methods for which the filter method
-(called with the method name as argument) returns true are accessible.
-The default filter method returns true unless the method name begins
-with an underscore.
-
-There are a number of possible implementations of bastions. We use a
-'lazy' approach where the bastion's __getattr__() discipline does all
-the work for a particular method the first time it is used. This is
-usually fastest, especially if the user doesn't call all available
-methods. The retrieved methods are stored as instance variables of
-the bastion, so the overhead is only occurred on the first use of each
-method.
-
-Detail: the bastion class has a __repr__() discipline which includes
-the repr() of the original object. This is precomputed when the
-bastion is created.
-
-"""
-
-
-from types import MethodType
-
-
-class BastionClass:
-
- """Helper class used by the Bastion() function.
-
- You could subclass this and pass the subclass as the bastionclass
- argument to the Bastion() function, as long as the constructor has
- the same signature (a get() function and a name for the object).
-
- """
-
- def __init__(self, get, name):
- """Constructor.
-
- Arguments:
-
- get - a function that gets the attribute value (by name)
- name - a human-readable name for the original object
- (suggestion: use repr(object))
-
- """
- self._get_ = get
- self._name_ = name
-
- def __repr__(self):
- """Return a representation string.
-
- This includes the name passed in to the constructor, so that
- if you print the bastion during debugging, at least you have
- some idea of what it is.
-
- """
- return "<Bastion for %s>" % self._name_
-
- def __getattr__(self, name):
- """Get an as-yet undefined attribute value.
-
- This calls the get() function that was passed to the
- constructor. The result is stored as an instance variable so
- that the next time the same attribute is requested,
- __getattr__() won't be invoked.
-
- If the get() function raises an exception, this is simply
- passed on -- exceptions are not cached.
-
- """
- attribute = self._get_(name)
- self.__dict__[name] = attribute
- return attribute
-
-
-def Bastion(object, filter = lambda name: name[:1] != '_',
- name=None, bastionclass=BastionClass):
- """Create a bastion for an object, using an optional filter.
-
- See the Bastion module's documentation for background.
-
- Arguments:
-
- object - the original object
- filter - a predicate that decides whether a function name is OK;
- by default all names are OK that don't start with '_'
- name - the name of the object; default repr(object)
- bastionclass - class used to create the bastion; default BastionClass
-
- """
-
- # Note: we define *two* ad-hoc functions here, get1 and get2.
- # Both are intended to be called in the same way: get(name).
- # It is clear that the real work (getting the attribute
- # from the object and calling the filter) is done in get1.
- # Why can't we pass get1 to the bastion? Because the user
- # would be able to override the filter argument! With get2,
- # overriding the default argument is no security loophole:
- # all it does is call it.
- # Also notice that we can't place the object and filter as
- # instance variables on the bastion object itself, since
- # the user has full access to all instance variables!
-
- def get1(name, object=object, filter=filter):
- """Internal function for Bastion(). See source comments."""
- if filter(name):
- attribute = getattr(object, name)
- if type(attribute) == MethodType:
- return attribute
- raise AttributeError, name
-
- def get2(name, get1=get1):
- """Internal function for Bastion(). See source comments."""
- return get1(name)
-
- if name is None:
- name = `object`
- return bastionclass(get2, name)
-
-
-def _test():
- """Test the Bastion() function."""
- class Original:
- def __init__(self):
- self.sum = 0
- def add(self, n):
- self._add(n)
- def _add(self, n):
- self.sum = self.sum + n
- def total(self):
- return self.sum
- o = Original()
- b = Bastion(o)
- testcode = """if 1:
- b.add(81)
- b.add(18)
- print "b.total() =", b.total()
- try:
- print "b.sum =", b.sum,
- except:
- print "inaccessible"
- else:
- print "accessible"
- try:
- print "b._add =", b._add,
- except:
- print "inaccessible"
- else:
- print "accessible"
- try:
- print "b._get_.func_defaults =", b._get_.func_defaults,
- except:
- print "inaccessible"
- else:
- print "accessible"
- \n"""
- exec testcode
- print '='*20, "Using rexec:", '='*20
- import rexec
- r = rexec.RExec()
- m = r.add_module('__main__')
- m.b = b
- r.r_exec(testcode)
-
-
-if __name__ == '__main__':
- _test()
diff --git a/Lib/dos-8x3/cgihttps.py b/Lib/dos-8x3/cgihttps.py
deleted file mode 100755
index ba1e76b..0000000
--- a/Lib/dos-8x3/cgihttps.py
+++ /dev/null
@@ -1,305 +0,0 @@
-"""CGI-savvy HTTP Server.
-
-This module builds on SimpleHTTPServer by implementing GET and POST
-requests to cgi-bin scripts.
-
-If the os.fork() function is not present (e.g. on Windows),
-os.popen2() is used as a fallback, with slightly altered semantics; if
-that function is not present either (e.g. on Macintosh), only Python
-scripts are supported, and they are executed by the current process.
-
-In all cases, the implementation is intentionally naive -- all
-requests are executed sychronously.
-
-SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
--- it may execute arbitrary Python code or external programs.
-
-"""
-
-
-__version__ = "0.4"
-
-
-import os
-import sys
-import string
-import urllib
-import BaseHTTPServer
-import SimpleHTTPServer
-
-
-class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
-
- """Complete HTTP server with GET, HEAD and POST commands.
-
- GET and HEAD also support running CGI scripts.
-
- The POST command is *only* implemented for CGI scripts.
-
- """
-
- # Determine platform specifics
- have_fork = hasattr(os, 'fork')
- have_popen2 = hasattr(os, 'popen2')
-
- # Make rfile unbuffered -- we need to read one line and then pass
- # the rest to a subprocess, so we can't use buffered input.
- rbufsize = 0
-
- def do_POST(self):
- """Serve a POST request.
-
- This is only implemented for CGI scripts.
-
- """
-
- if self.is_cgi():
- self.run_cgi()
- else:
- self.send_error(501, "Can only POST to CGI scripts")
-
- def send_head(self):
- """Version of send_head that support CGI scripts"""
- if self.is_cgi():
- return self.run_cgi()
- else:
- return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
-
- def is_cgi(self):
- """Test whether self.path corresponds to a CGI script.
-
- Return a tuple (dir, rest) if self.path requires running a
- CGI script, None if not. Note that rest begins with a
- slash if it is not empty.
-
- The default implementation tests whether the path
- begins with one of the strings in the list
- self.cgi_directories (and the next character is a '/'
- or the end of the string).
-
- """
-
- path = self.path
-
- for x in self.cgi_directories:
- i = len(x)
- if path[:i] == x and (not path[i:] or path[i] == '/'):
- self.cgi_info = path[:i], path[i+1:]
- return 1
- return 0
-
- cgi_directories = ['/cgi-bin', '/htbin']
-
- def is_executable(self, path):
- """Test whether argument path is an executable file."""
- return executable(path)
-
- def is_python(self, path):
- """Test whether argument path is a Python script."""
- head, tail = os.path.splitext(path)
- return tail.lower() in (".py", ".pyw")
-
- def run_cgi(self):
- """Execute a CGI script."""
- dir, rest = self.cgi_info
- i = string.rfind(rest, '?')
- if i >= 0:
- rest, query = rest[:i], rest[i+1:]
- else:
- query = ''
- i = string.find(rest, '/')
- if i >= 0:
- script, rest = rest[:i], rest[i:]
- else:
- script, rest = rest, ''
- scriptname = dir + '/' + script
- scriptfile = self.translate_path(scriptname)
- if not os.path.exists(scriptfile):
- self.send_error(404, "No such CGI script (%s)" % `scriptname`)
- return
- if not os.path.isfile(scriptfile):
- self.send_error(403, "CGI script is not a plain file (%s)" %
- `scriptname`)
- return
- ispy = self.is_python(scriptname)
- if not ispy:
- if not (self.have_fork or self.have_popen2):
- self.send_error(403, "CGI script is not a Python script (%s)" %
- `scriptname`)
- return
- if not self.is_executable(scriptfile):
- self.send_error(403, "CGI script is not executable (%s)" %
- `scriptname`)
- return
-
- # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
- # XXX Much of the following could be prepared ahead of time!
- env = {}
- env['SERVER_SOFTWARE'] = self.version_string()
- env['SERVER_NAME'] = self.server.server_name
- env['GATEWAY_INTERFACE'] = 'CGI/1.1'
- env['SERVER_PROTOCOL'] = self.protocol_version
- env['SERVER_PORT'] = str(self.server.server_port)
- env['REQUEST_METHOD'] = self.command
- uqrest = urllib.unquote(rest)
- env['PATH_INFO'] = uqrest
- env['PATH_TRANSLATED'] = self.translate_path(uqrest)
- env['SCRIPT_NAME'] = scriptname
- if query:
- env['QUERY_STRING'] = query
- host = self.address_string()
- if host != self.client_address[0]:
- env['REMOTE_HOST'] = host
- env['REMOTE_ADDR'] = self.client_address[0]
- # XXX AUTH_TYPE
- # XXX REMOTE_USER
- # XXX REMOTE_IDENT
- if self.headers.typeheader is None:
- env['CONTENT_TYPE'] = self.headers.type
- else:
- env['CONTENT_TYPE'] = self.headers.typeheader
- length = self.headers.getheader('content-length')
- if length:
- env['CONTENT_LENGTH'] = length
- accept = []
- for line in self.headers.getallmatchingheaders('accept'):
- if line[:1] in string.whitespace:
- accept.append(string.strip(line))
- else:
- accept = accept + string.split(line[7:], ',')
- env['HTTP_ACCEPT'] = string.joinfields(accept, ',')
- ua = self.headers.getheader('user-agent')
- if ua:
- env['HTTP_USER_AGENT'] = ua
- co = filter(None, self.headers.getheaders('cookie'))
- if co:
- env['HTTP_COOKIE'] = string.join(co, ', ')
- # XXX Other HTTP_* headers
- if not self.have_fork:
- # Since we're setting the env in the parent, provide empty
- # values to override previously set values
- for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
- 'HTTP_USER_AGENT', 'HTTP_COOKIE'):
- env.setdefault(k, "")
-
- self.send_response(200, "Script output follows")
-
- decoded_query = string.replace(query, '+', ' ')
-
- if self.have_fork:
- # Unix -- fork as we should
- args = [script]
- if '=' not in decoded_query:
- args.append(decoded_query)
- nobody = nobody_uid()
- self.wfile.flush() # Always flush before forking
- pid = os.fork()
- if pid != 0:
- # Parent
- pid, sts = os.waitpid(pid, 0)
- if sts:
- self.log_error("CGI script exit status %#x", sts)
- return
- # Child
- try:
- try:
- os.setuid(nobody)
- except os.error:
- pass
- os.dup2(self.rfile.fileno(), 0)
- os.dup2(self.wfile.fileno(), 1)
- os.execve(scriptfile, args, env)
- except:
- self.server.handle_error(self.request, self.client_address)
- os._exit(127)
-
- elif self.have_popen2:
- # Windows -- use popen2 to create a subprocess
- import shutil
- os.environ.update(env)
- cmdline = scriptfile
- if self.is_python(scriptfile):
- interp = sys.executable
- if interp.lower().endswith("w.exe"):
- # On Windows, use python.exe, not python.exe
- interp = interp[:-5] = interp[-4:]
- cmdline = "%s %s" % (interp, cmdline)
- if '=' not in query and '"' not in query:
- cmdline = '%s "%s"' % (cmdline, query)
- self.log_error("command: %s", cmdline)
- try:
- nbytes = int(length)
- except:
- nbytes = 0
- fi, fo = os.popen2(cmdline)
- if self.command.lower() == "post" and nbytes > 0:
- data = self.rfile.read(nbytes)
- fi.write(data)
- fi.close()
- shutil.copyfileobj(fo, self.wfile)
- sts = fo.close()
- if sts:
- self.log_error("CGI script exit status %#x", sts)
- else:
- self.log_error("CGI script exited OK")
-
- else:
- # Other O.S. -- execute script in this process
- os.environ.update(env)
- save_argv = sys.argv
- save_stdin = sys.stdin
- save_stdout = sys.stdout
- save_stderr = sys.stderr
- try:
- try:
- sys.argv = [scriptfile]
- if '=' not in decoded_query:
- sys.argv.append(decoded_query)
- sys.stdout = self.wfile
- sys.stdin = self.rfile
- execfile(scriptfile, {"__name__": "__main__"})
- finally:
- sys.argv = save_argv
- sys.stdin = save_stdin
- sys.stdout = save_stdout
- sys.stderr = save_stderr
- except SystemExit, sts:
- self.log_error("CGI script exit status %s", str(sts))
- else:
- self.log_error("CGI script exited OK")
-
-
-nobody = None
-
-def nobody_uid():
- """Internal routine to get nobody's uid"""
- global nobody
- if nobody:
- return nobody
- try:
- import pwd
- except ImportError:
- return -1
- try:
- nobody = pwd.getpwnam('nobody')[2]
- except KeyError:
- nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
- return nobody
-
-
-def executable(path):
- """Test for executable file."""
- try:
- st = os.stat(path)
- except os.error:
- return 0
- return st[0] & 0111 != 0
-
-
-def test(HandlerClass = CGIHTTPRequestHandler,
- ServerClass = BaseHTTPServer.HTTPServer):
- SimpleHTTPServer.test(HandlerClass, ServerClass)
-
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/compilea.py b/Lib/dos-8x3/compilea.py
deleted file mode 100755
index e56c8b2..0000000
--- a/Lib/dos-8x3/compilea.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
-
-When called as a script with arguments, this compiles the directories
-given as arguments recursively; the -l option prevents it from
-recursing into directories.
-
-Without arguments, if compiles all modules on sys.path, without
-recursing into subdirectories. (Even though it should do so for
-packages -- for now, you'll have to deal with packages separately.)
-
-See module py_compile for details of the actual byte-compilation.
-
-"""
-
-import os
-import stat
-import sys
-import py_compile
-
-def compile_dir(dir, maxlevels=10, ddir=None, force=0):
- """Byte-compile all modules in the given directory tree.
-
- Arguments (only dir is required):
-
- dir: the directory to byte-compile
- maxlevels: maximum recursion level (default 10)
- ddir: if given, purported directory name (this is the
- directory name that will show up in error messages)
- force: if 1, force compilation, even if timestamps are up-to-date
-
- """
- print 'Listing', dir, '...'
- try:
- names = os.listdir(dir)
- except os.error:
- print "Can't list", dir
- names = []
- names.sort()
- success = 1
- for name in names:
- fullname = os.path.join(dir, name)
- if ddir:
- dfile = os.path.join(ddir, name)
- else:
- dfile = None
- if os.path.isfile(fullname):
- head, tail = name[:-3], name[-3:]
- if tail == '.py':
- cfile = fullname + (__debug__ and 'c' or 'o')
- ftime = os.stat(fullname)[stat.ST_MTIME]
- try: ctime = os.stat(cfile)[stat.ST_MTIME]
- except os.error: ctime = 0
- if (ctime > ftime) and not force: continue
- print 'Compiling', fullname, '...'
- try:
- py_compile.compile(fullname, None, dfile)
- except KeyboardInterrupt:
- raise KeyboardInterrupt
- except:
- if type(sys.exc_type) == type(''):
- exc_type_name = sys.exc_type
- else: exc_type_name = sys.exc_type.__name__
- print 'Sorry:', exc_type_name + ':',
- print sys.exc_value
- success = 0
- elif maxlevels > 0 and \
- name != os.curdir and name != os.pardir and \
- os.path.isdir(fullname) and \
- not os.path.islink(fullname):
- compile_dir(fullname, maxlevels - 1, dfile, force)
- return success
-
-def compile_path(skip_curdir=1, maxlevels=0, force=0):
- """Byte-compile all module on sys.path.
-
- Arguments (all optional):
-
- skip_curdir: if true, skip current directory (default true)
- maxlevels: max recursion level (default 0)
- force: as for compile_dir() (default 0)
-
- """
- success = 1
- for dir in sys.path:
- if (not dir or dir == os.curdir) and skip_curdir:
- print 'Skipping current directory'
- else:
- success = success and compile_dir(dir, maxlevels, None, force)
- return success
-
-def main():
- """Script main program."""
- import getopt
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'lfd:')
- except getopt.error, msg:
- print msg
- print "usage: compileall [-l] [-f] [-d destdir] [directory ...]"
- print "-l: don't recurse down"
- print "-f: force rebuild even if timestamps are up-to-date"
- print "-d destdir: purported directory name for error messages"
- print "if no directory arguments, -l sys.path is assumed"
- sys.exit(2)
- maxlevels = 10
- ddir = None
- force = 0
- for o, a in opts:
- if o == '-l': maxlevels = 0
- if o == '-d': ddir = a
- if o == '-f': force = 1
- if ddir:
- if len(args) != 1:
- print "-d destdir require exactly one directory argument"
- sys.exit(2)
- success = 1
- try:
- if args:
- for dir in args:
- success = success and compile_dir(dir, maxlevels, ddir, force)
- else:
- success = compile_path()
- except KeyboardInterrupt:
- print "\n[interrupt]"
- success = 0
- return success
-
-if __name__ == '__main__':
- sys.exit(not main())
diff --git a/Lib/dos-8x3/configpa.py b/Lib/dos-8x3/configpa.py
deleted file mode 100644
index 5043687..0000000
--- a/Lib/dos-8x3/configpa.py
+++ /dev/null
@@ -1,469 +0,0 @@
-"""Configuration file parser.
-
-A setup file consists of sections, lead by a "[section]" header,
-and followed by "name: value" entries, with continuations and such in
-the style of RFC 822.
-
-The option values can contain format strings which refer to other values in
-the same section, or values in a special [DEFAULT] section.
-
-For example:
-
- something: %(dir)s/whatever
-
-would resolve the "%(dir)s" to the value of dir. All reference
-expansions are done late, on demand.
-
-Intrinsic defaults can be specified by passing them into the
-ConfigParser constructor as a dictionary.
-
-class:
-
-ConfigParser -- responsible for for parsing a list of
- configuration files, and managing the parsed database.
-
- methods:
-
- __init__(defaults=None)
- create the parser and specify a dictionary of intrinsic defaults. The
- keys must be strings, the values must be appropriate for %()s string
- interpolation. Note that `__name__' is always an intrinsic default;
- it's value is the section's name.
-
- sections()
- return all the configuration section names, sans DEFAULT
-
- has_section(section)
- return whether the given section exists
-
- has_option(section, option)
- return whether the given option exists in the given section
-
- options(section)
- return list of configuration options for the named section
-
- has_option(section, option)
- return whether the given section has the given option
-
- read(filenames)
- read and parse the list of named configuration files, given by
- name. A single filename is also allowed. Non-existing files
- are ignored.
-
- readfp(fp, filename=None)
- read and parse one configuration file, given as a file object.
- The filename defaults to fp.name; it is only used in error
- messages (if fp has no `name' attribute, the string `<???>' is used).
-
- get(section, option, raw=0, vars=None)
- return a string value for the named option. All % interpolations are
- expanded in the return values, based on the defaults passed into the
- constructor and the DEFAULT section. Additional substitutions may be
- provided using the `vars' argument, which must be a dictionary whose
- contents override any pre-existing defaults.
-
- getint(section, options)
- like get(), but convert value to an integer
-
- getfloat(section, options)
- like get(), but convert value to a float
-
- getboolean(section, options)
- like get(), but convert value to a boolean (currently defined as 0 or
- 1, only)
-
- remove_section(section)
- remove the given file section and all its options
-
- remove_option(section, option)
- remove the given option from the given section
-
- set(section, option, value)
- set the given option
-
- write(fp)
- write the configuration state in .ini format
-"""
-
-import sys
-import string
-import re
-
-DEFAULTSECT = "DEFAULT"
-
-MAX_INTERPOLATION_DEPTH = 10
-
-
-
-# exception classes
-class Error:
- def __init__(self, msg=''):
- self._msg = msg
- def __repr__(self):
- return self._msg
-
-class NoSectionError(Error):
- def __init__(self, section):
- Error.__init__(self, 'No section: %s' % section)
- self.section = section
-
-class DuplicateSectionError(Error):
- def __init__(self, section):
- Error.__init__(self, "Section %s already exists" % section)
- self.section = section
-
-class NoOptionError(Error):
- def __init__(self, option, section):
- Error.__init__(self, "No option `%s' in section: %s" %
- (option, section))
- self.option = option
- self.section = section
-
-class InterpolationError(Error):
- def __init__(self, reference, option, section, rawval):
- Error.__init__(self,
- "Bad value substitution:\n"
- "\tsection: [%s]\n"
- "\toption : %s\n"
- "\tkey : %s\n"
- "\trawval : %s\n"
- % (section, option, reference, rawval))
- self.reference = reference
- self.option = option
- self.section = section
-
-class InterpolationDepthError(Error):
- def __init__(self, option, section, rawval):
- Error.__init__(self,
- "Value interpolation too deeply recursive:\n"
- "\tsection: [%s]\n"
- "\toption : %s\n"
- "\trawval : %s\n"
- % (section, option, rawval))
- self.option = option
- self.section = section
-
-class ParsingError(Error):
- def __init__(self, filename):
- Error.__init__(self, 'File contains parsing errors: %s' % filename)
- self.filename = filename
- self.errors = []
-
- def append(self, lineno, line):
- self.errors.append((lineno, line))
- self._msg = self._msg + '\n\t[line %2d]: %s' % (lineno, line)
-
-class MissingSectionHeaderError(ParsingError):
- def __init__(self, filename, lineno, line):
- Error.__init__(
- self,
- 'File contains no section headers.\nfile: %s, line: %d\n%s' %
- (filename, lineno, line))
- self.filename = filename
- self.lineno = lineno
- self.line = line
-
-
-
-class ConfigParser:
- def __init__(self, defaults=None):
- self.__sections = {}
- if defaults is None:
- self.__defaults = {}
- else:
- self.__defaults = defaults
-
- def defaults(self):
- return self.__defaults
-
- def sections(self):
- """Return a list of section names, excluding [DEFAULT]"""
- # self.__sections will never have [DEFAULT] in it
- return self.__sections.keys()
-
- def add_section(self, section):
- """Create a new section in the configuration.
-
- Raise DuplicateSectionError if a section by the specified name
- already exists.
- """
- if self.__sections.has_key(section):
- raise DuplicateSectionError(section)
- self.__sections[section] = {}
-
- def has_section(self, section):
- """Indicate whether the named section is present in the configuration.
-
- The DEFAULT section is not acknowledged.
- """
- return section in self.sections()
-
- def options(self, section):
- """Return a list of option names for the given section name."""
- try:
- opts = self.__sections[section].copy()
- except KeyError:
- raise NoSectionError(section)
- opts.update(self.__defaults)
- if opts.has_key('__name__'):
- del opts['__name__']
- return opts.keys()
-
- def has_option(self, section, option):
- """Return whether the given section has the given option."""
- return option in self.options(section)
-
- def read(self, filenames):
- """Read and parse a filename or a list of filenames.
-
- Files that cannot be opened are silently ignored; this is
- designed so that you can specify a list of potential
- configuration file locations (e.g. current directory, user's
- home directory, systemwide directory), and all existing
- configuration files in the list will be read. A single
- filename may also be given.
- """
- if type(filenames) in [type(''), type(u'')]:
- filenames = [filenames]
- for filename in filenames:
- try:
- fp = open(filename)
- except IOError:
- continue
- self.__read(fp, filename)
- fp.close()
-
- def readfp(self, fp, filename=None):
- """Like read() but the argument must be a file-like object.
-
- The `fp' argument must have a `readline' method. Optional
- second argument is the `filename', which if not given, is
- taken from fp.name. If fp has no `name' attribute, `<???>' is
- used.
-
- """
- if filename is None:
- try:
- filename = fp.name
- except AttributeError:
- filename = '<???>'
- self.__read(fp, filename)
-
- def get(self, section, option, raw=0, vars=None):
- """Get an option value for a given section.
-
- All % interpolations are expanded in the return values, based on the
- defaults passed into the constructor, unless the optional argument
- `raw' is true. Additional substitutions may be provided using the
- `vars' argument, which must be a dictionary whose contents overrides
- any pre-existing defaults.
-
- The section DEFAULT is special.
- """
- try:
- sectdict = self.__sections[section].copy()
- except KeyError:
- if section == DEFAULTSECT:
- sectdict = {}
- else:
- raise NoSectionError(section)
- d = self.__defaults.copy()
- d.update(sectdict)
- # Update with the entry specific variables
- if vars:
- d.update(vars)
- option = self.optionxform(option)
- try:
- rawval = d[option]
- except KeyError:
- raise NoOptionError(option, section)
-
- if raw:
- return rawval
-
- # do the string interpolation
- value = rawval # Make it a pretty variable name
- depth = 0
- while depth < 10: # Loop through this until it's done
- depth = depth + 1
- if string.find(value, "%(") >= 0:
- try:
- value = value % d
- except KeyError, key:
- raise InterpolationError(key, option, section, rawval)
- else:
- break
- if value.find("%(") >= 0:
- raise InterpolationDepthError(option, section, rawval)
- return value
-
- def __get(self, section, conv, option):
- return conv(self.get(section, option))
-
- def getint(self, section, option):
- return self.__get(section, string.atoi, option)
-
- def getfloat(self, section, option):
- return self.__get(section, string.atof, option)
-
- def getboolean(self, section, option):
- v = self.get(section, option)
- val = string.atoi(v)
- if val not in (0, 1):
- raise ValueError, 'Not a boolean: %s' % v
- return val
-
- def optionxform(self, optionstr):
- return string.lower(optionstr)
-
- def has_option(self, section, option):
- """Check for the existence of a given option in a given section."""
- if not section or section == "DEFAULT":
- return self.__defaults.has_key(option)
- elif not self.has_section(section):
- return 0
- else:
- return self.__sections[section].has_key(option)
-
- def set(self, section, option, value):
- """Set an option."""
- if not section or section == "DEFAULT":
- sectdict = self.__defaults
- else:
- try:
- sectdict = self.__sections[section]
- except KeyError:
- raise NoSectionError(section)
- sectdict[option] = value
-
- def write(self, fp):
- """Write an .ini-format representation of the configuration state."""
- if self.__defaults:
- fp.write("[DEFAULT]\n")
- for (key, value) in self.__defaults.items():
- fp.write("%s = %s\n" % (key, value))
- fp.write("\n")
- for section in self.sections():
- fp.write("[" + section + "]\n")
- sectdict = self.__sections[section]
- for (key, value) in sectdict.items():
- if key == "__name__":
- continue
- fp.write("%s = %s\n" % (key, value))
- fp.write("\n")
-
- def remove_option(self, section, option):
- """Remove an option."""
- if not section or section == "DEFAULT":
- sectdict = self.__defaults
- else:
- try:
- sectdict = self.__sections[section]
- except KeyError:
- raise NoSectionError(section)
- existed = sectdict.has_key(key)
- if existed:
- del sectdict[key]
- return existed
-
- def remove_section(self, section):
- """Remove a file section."""
- if self.__sections.has_key(section):
- del self.__sections[section]
- return 1
- else:
- return 0
-
- #
- # Regular expressions for parsing section headers and options. Note a
- # slight semantic change from the previous version, because of the use
- # of \w, _ is allowed in section header names.
- SECTCRE = re.compile(
- r'\[' # [
- r'(?P<header>[-\w_.*,(){} ]+)' # a lot of stuff found by IvL
- r'\]' # ]
- )
- OPTCRE = re.compile(
- r'(?P<option>[-\w_.*,(){}]+)' # a lot of stuff found by IvL
- r'[ \t]*(?P<vi>[:=])[ \t]*' # any number of space/tab,
- # followed by separator
- # (either : or =), followed
- # by any # space/tab
- r'(?P<value>.*)$' # everything up to eol
- )
-
- def __read(self, fp, fpname):
- """Parse a sectioned setup file.
-
- The sections in setup file contains a title line at the top,
- indicated by a name in square brackets (`[]'), plus key/value
- options lines, indicated by `name: value' format lines.
- Continuation are represented by an embedded newline then
- leading whitespace. Blank lines, lines beginning with a '#',
- and just about everything else is ignored.
- """
- cursect = None # None, or a dictionary
- optname = None
- lineno = 0
- e = None # None, or an exception
- while 1:
- line = fp.readline()
- if not line:
- break
- lineno = lineno + 1
- # comment or blank line?
- if string.strip(line) == '' or line[0] in '#;':
- continue
- if string.lower(string.split(line)[0]) == 'rem' \
- and line[0] in "rR": # no leading whitespace
- continue
- # continuation line?
- if line[0] in ' \t' and cursect is not None and optname:
- value = string.strip(line)
- if value:
- cursect[optname] = cursect[optname] + '\n ' + value
- # a section header or option header?
- else:
- # is it a section header?
- mo = self.SECTCRE.match(line)
- if mo:
- sectname = mo.group('header')
- if self.__sections.has_key(sectname):
- cursect = self.__sections[sectname]
- elif sectname == DEFAULTSECT:
- cursect = self.__defaults
- else:
- cursect = {'__name__': sectname}
- self.__sections[sectname] = cursect
- # So sections can't start with a continuation line
- optname = None
- # no section header in the file?
- elif cursect is None:
- raise MissingSectionHeaderError(fpname, lineno, `line`)
- # an option line?
- else:
- mo = self.OPTCRE.match(line)
- if mo:
- optname, vi, optval = mo.group('option', 'vi', 'value')
- if vi in ('=', ':') and ';' in optval:
- # ';' is a comment delimiter only if it follows
- # a spacing character
- pos = string.find(optval, ';')
- if pos and optval[pos-1] in string.whitespace:
- optval = optval[:pos]
- optval = string.strip(optval)
- # allow empty values
- if optval == '""':
- optval = ''
- cursect[self.optionxform(optname)] = optval
- else:
- # a non-fatal parsing error occurred. set up the
- # exception but keep going. the exception will be
- # raised at the end of the file and will contain a
- # list of all bogus lines
- if not e:
- e = ParsingError(fpname)
- e.append(lineno, `line`)
- # if any parsing errors occurred, raise an exception
- if e:
- raise e
diff --git a/Lib/dos-8x3/cookie.py b/Lib/dos-8x3/cookie.py
deleted file mode 100644
index 67259af..0000000
--- a/Lib/dos-8x3/cookie.py
+++ /dev/null
@@ -1,726 +0,0 @@
-#!/usr/bin/env python
-#
-
-####
-# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
-#
-# All Rights Reserved
-#
-# Permission to use, copy, modify, and distribute this software
-# and its documentation for any purpose and without fee is hereby
-# granted, provided that the above copyright notice appear in all
-# copies and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Timothy O'Malley not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
-# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
-# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-####
-#
-# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
-# by Timothy O'Malley <timo@alum.mit.edu>
-#
-# Cookie.py is a Python module for the handling of HTTP
-# cookies as a Python dictionary. See RFC 2109 for more
-# information on cookies.
-#
-# The original idea to treat Cookies as a dictionary came from
-# Dave Mitchell (davem@magnet.com) in 1995, when he released the
-# first version of nscookie.py.
-#
-####
-
-"""
-Here's a sample session to show how to use this module.
-At the moment, this is the only documentation.
-
-The Basics
-----------
-
-Importing is easy..
-
- >>> import Cookie
-
-Most of the time you start by creating a cookie. Cookies come in
-three flavors, each with slighly different encoding semanitcs, but
-more on that later.
-
- >>> C = Cookie.SimpleCookie()
- >>> C = Cookie.SerialCookie()
- >>> C = Cookie.SmartCookie()
-
-[Note: Long-time users of Cookie.py will remember using
-Cookie.Cookie() to create an Cookie object. Although deprecated, it
-is still supported by the code. See the Backward Compatibility notes
-for more information.]
-
-Once you've created your Cookie, you can add values just as if it were
-a dictionary.
-
- >>> C = Cookie.SmartCookie()
- >>> C["fig"] = "newton"
- >>> C["sugar"] = "wafer"
- >>> print C
- Set-Cookie: sugar=wafer;
- Set-Cookie: fig=newton;
-
-Notice that the printable representation of a Cookie is the
-appropriate format for a Set-Cookie: header. This is the
-default behavior. You can change the header and printed
-attributes by using the the .output() function
-
- >>> C = Cookie.SmartCookie()
- >>> C["rocky"] = "road"
- >>> C["rocky"]["path"] = "/cookie"
- >>> print C.output(header="Cookie:")
- Cookie: rocky=road; Path=/cookie;
- >>> print C.output(attrs=[], header="Cookie:")
- Cookie: rocky=road;
-
-The load() method of a Cookie extracts cookies from a string. In a
-CGI script, you would use this method to extract the cookies from the
-HTTP_COOKIE environment variable.
-
- >>> C = Cookie.SmartCookie()
- >>> C.load("chips=ahoy; vienna=finger")
- >>> print C
- Set-Cookie: vienna=finger;
- Set-Cookie: chips=ahoy;
-
-The load() method is darn-tootin smart about identifying cookies
-within a string. Escaped quotation marks, nested semicolons, and other
-such trickeries do not confuse it.
-
- >>> C = Cookie.SmartCookie()
- >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
- >>> print C
- Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;";
-
-Each element of the Cookie also supports all of the RFC 2109
-Cookie attributes. Here's an example which sets the Path
-attribute.
-
- >>> C = Cookie.SmartCookie()
- >>> C["oreo"] = "doublestuff"
- >>> C["oreo"]["path"] = "/"
- >>> print C
- Set-Cookie: oreo="doublestuff"; Path=/;
-
-Each dictionary element has a 'value' attribute, which gives you
-back the value associated with the key.
-
- >>> C = Cookie.SmartCookie()
- >>> C["twix"] = "none for you"
- >>> C["twix"].value
- 'none for you'
-
-
-A Bit More Advanced
--------------------
-
-As mentioned before, there are three different flavors of Cookie
-objects, each with different encoding/decoding semantics. This
-section briefly discusses the differences.
-
-SimpleCookie
-
-The SimpleCookie expects that all values should be standard strings.
-Just to be sure, SimpleCookie invokes the str() builtin to convert
-the value to a string, when the values are set dictionary-style.
-
- >>> C = Cookie.SimpleCookie()
- >>> C["number"] = 7
- >>> C["string"] = "seven"
- >>> C["number"].value
- '7'
- >>> C["string"].value
- 'seven'
- >>> print C
- Set-Cookie: number=7;
- Set-Cookie: string=seven;
-
-
-SerialCookie
-
-The SerialCookie expects that all values should be serialized using
-cPickle (or pickle, if cPickle isn't available). As a result of
-serializing, SerialCookie can save almost any Python object to a
-value, and recover the exact same object when the cookie has been
-returned. (SerialCookie can yield some strange-looking cookie
-values, however.)
-
- >>> C = Cookie.SerialCookie()
- >>> C["number"] = 7
- >>> C["string"] = "seven"
- >>> C["number"].value
- 7
- >>> C["string"].value
- 'seven'
- >>> print C
- Set-Cookie: number="I7\012.";
- Set-Cookie: string="S'seven'\012p1\012.";
-
-Be warned, however, if SerialCookie cannot de-serialize a value (because
-it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
-
-
-SmartCookie
-
-The SmartCookie combines aspects of each of the other two flavors.
-When setting a value in a dictionary-fashion, the SmartCookie will
-serialize (ala cPickle) the value *if and only if* it isn't a
-Python string. String objects are *not* serialized. Similarly,
-when the load() method parses out values, it attempts to de-serialize
-the value. If it fails, then it fallsback to treating the value
-as a string.
-
- >>> C = Cookie.SmartCookie()
- >>> C["number"] = 7
- >>> C["string"] = "seven"
- >>> C["number"].value
- 7
- >>> C["string"].value
- 'seven'
- >>> print C
- Set-Cookie: number="I7\012.";
- Set-Cookie: string=seven;
-
-
-Backwards Compatibility
------------------------
-
-In order to keep compatibilty with earlier versions of Cookie.py,
-it is still possible to use Cookie.Cookie() to create a Cookie. In
-fact, this simply returns a SmartCookie.
-
- >>> C = Cookie.Cookie()
- >>> C.__class__
- <class Cookie.SmartCookie at 99f88>
-
-
-Finis.
-""" #"
-# ^
-# |----helps out font-lock
-
-#
-# Import our required modules
-#
-import string, sys
-from UserDict import UserDict
-
-try:
- from cPickle import dumps, loads
-except ImportError:
- from pickle import dumps, loads
-
-try:
- import re
-except ImportError:
- raise ImportError, "Cookie.py requires 're' from Python 1.5 or later"
-
-
-#
-# Define an exception visible to External modules
-#
-class CookieError(Exception):
- pass
-
-
-# These quoting routines conform to the RFC2109 specification, which in
-# turn references the character definitions from RFC2068. They provide
-# a two-way quoting algorithm. Any non-text character is translated
-# into a 4 character sequence: a forward-slash followed by the
-# three-digit octal equivalent of the character. Any '\' or '"' is
-# quoted with a preceeding '\' slash.
-#
-# These are taken from RFC2068 and RFC2109.
-# _LegalChars is the list of chars which don't require "'s
-# _Translator hash-table for fast quoting
-#
-_LegalChars = string.letters + string.digits + "!#$%&'*+-.^_`|~"
-_Translator = {
- '\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
- '\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
- '\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
- '\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
- '\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
- '\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
- '\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
- '\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
- '\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
- '\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
- '\036' : '\\036', '\037' : '\\037',
-
- '"' : '\\"', '\\' : '\\\\',
-
- '\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
- '\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
- '\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
- '\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
- '\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
- '\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
- '\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
- '\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
- '\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
- '\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
- '\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
- '\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
- '\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
- '\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
- '\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
- '\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
- '\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
- '\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
- '\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
- '\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
- '\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
- '\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
- '\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
- '\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
- '\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
- '\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
- '\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
- '\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
- '\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
- '\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
- '\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
- '\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
- '\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
- '\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
- '\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
- '\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
- '\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
- '\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
- '\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
- '\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
- '\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
- '\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
- '\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
- }
-
-def _quote(str, LegalChars=_LegalChars,
- join=string.join, idmap=string._idmap, translate=string.translate):
- #
- # If the string does not need to be double-quoted,
- # then just return the string. Otherwise, surround
- # the string in doublequotes and precede quote (with a \)
- # special characters.
- #
- if "" == translate(str, idmap, LegalChars):
- return str
- else:
- return '"' + join( map(_Translator.get, str, str), "" ) + '"'
-# end _quote
-
-
-_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
-_QuotePatt = re.compile(r"[\\].")
-
-def _unquote(str, join=string.join, atoi=string.atoi):
- # If there aren't any doublequotes,
- # then there can't be any special characters. See RFC 2109.
- if len(str) < 2:
- return str
- if str[0] != '"' or str[-1] != '"':
- return str
-
- # We have to assume that we must decode this string.
- # Down to work.
-
- # Remove the "s
- str = str[1:-1]
-
- # Check for special sequences. Examples:
- # \012 --> \n
- # \" --> "
- #
- i = 0
- n = len(str)
- res = []
- while 0 <= i < n:
- Omatch = _OctalPatt.search(str, i)
- Qmatch = _QuotePatt.search(str, i)
- if not Omatch and not Qmatch: # Neither matched
- res.append(str[i:])
- break
- # else:
- j = k = -1
- if Omatch: j = Omatch.start(0)
- if Qmatch: k = Qmatch.start(0)
- if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
- res.append(str[i:k])
- res.append(str[k+1])
- i = k+2
- else: # OctalPatt matched
- res.append(str[i:j])
- res.append( chr( atoi(str[j+1:j+4], 8) ) )
- i = j+4
- return join(res, "")
-# end _unquote
-
-# The _getdate() routine is used to set the expiration time in
-# the cookie's HTTP header. By default, _getdate() returns the
-# current time in the appropriate "expires" format for a
-# Set-Cookie header. The one optional argument is an offset from
-# now, in seconds. For example, an offset of -3600 means "one hour ago".
-# The offset may be a floating point number.
-#
-
-_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-
-_monthname = [None,
- 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-
-def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
- from time import gmtime, time
- now = time()
- year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
- return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
- (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
-
-
-#
-# A class to hold ONE key,value pair.
-# In a cookie, each such pair may have several attributes.
-# so this class is used to keep the attributes associated
-# with the appropriate key,value pair.
-# This class also includes a coded_value attribute, which
-# is used to hold the network representation of the
-# value. This is most useful when Python objects are
-# pickled for network transit.
-#
-
-class Morsel(UserDict):
- # RFC 2109 lists these attributes as reserved:
- # path comment domain
- # max-age secure version
- #
- # For historical reasons, these attributes are also reserved:
- # expires
- #
- # This dictionary provides a mapping from the lowercase
- # variant on the left to the appropriate traditional
- # formatting on the right.
- _reserved = { "expires" : "expires",
- "path" : "Path",
- "comment" : "Comment",
- "domain" : "Domain",
- "max-age" : "Max-Age",
- "secure" : "secure",
- "version" : "Version",
- }
- _reserved_keys = _reserved.keys()
-
- def __init__(self):
- # Set defaults
- self.key = self.value = self.coded_value = None
- UserDict.__init__(self)
-
- # Set default attributes
- for K in self._reserved_keys:
- UserDict.__setitem__(self, K, "")
- # end __init__
-
- def __setitem__(self, K, V):
- K = string.lower(K)
- if not K in self._reserved_keys:
- raise CookieError("Invalid Attribute %s" % K)
- UserDict.__setitem__(self, K, V)
- # end __setitem__
-
- def isReservedKey(self, K):
- return string.lower(K) in self._reserved_keys
- # end isReservedKey
-
- def set(self, key, val, coded_val,
- LegalChars=_LegalChars,
- idmap=string._idmap, translate=string.translate ):
- # First we verify that the key isn't a reserved word
- # Second we make sure it only contains legal characters
- if string.lower(key) in self._reserved_keys:
- raise CookieError("Attempt to set a reserved key: %s" % key)
- if "" != translate(key, idmap, LegalChars):
- raise CookieError("Illegal key value: %s" % key)
-
- # It's a good key, so save it.
- self.key = key
- self.value = val
- self.coded_value = coded_val
- # end set
-
- def output(self, attrs=None, header = "Set-Cookie:"):
- return "%s %s" % ( header, self.OutputString(attrs) )
-
- __str__ = output
-
- def __repr__(self):
- return '<%s: %s=%s>' % (self.__class__.__name__,
- self.key, repr(self.value) )
-
- def js_output(self, attrs=None):
- # Print javascript
- return """
- <SCRIPT LANGUAGE="JavaScript">
- <!-- begin hiding
- document.cookie = \"%s\"
- // end hiding -->
- </script>
- """ % ( self.OutputString(attrs), )
- # end js_output()
-
- def OutputString(self, attrs=None):
- # Build up our result
- #
- result = []
- RA = result.append
-
- # First, the key=value pair
- RA("%s=%s;" % (self.key, self.coded_value))
-
- # Now add any defined attributes
- if attrs == None:
- attrs = self._reserved_keys
- for K,V in self.items():
- if V == "": continue
- if K not in attrs: continue
- if K == "expires" and type(V) == type(1):
- RA("%s=%s;" % (self._reserved[K], _getdate(V)))
- elif K == "max-age" and type(V) == type(1):
- RA("%s=%d;" % (self._reserved[K], V))
- elif K == "secure":
- RA("%s;" % self._reserved[K])
- else:
- RA("%s=%s;" % (self._reserved[K], V))
-
- # Return the result
- return string.join(result, " ")
- # end OutputString
-# end Morsel class
-
-
-
-#
-# Pattern for finding cookie
-#
-# This used to be strict parsing based on the RFC2109 and RFC2068
-# specifications. I have since discovered that MSIE 3.0x doesn't
-# follow the character rules outlined in those specs. As a
-# result, the parsing rules here are less strict.
-#
-
-_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{]"
-_CookiePattern = re.compile(
- r"(?x)" # This is a Verbose pattern
- r"(?P<key>" # Start of group 'key'
- ""+ _LegalCharsPatt +"+" # Any word of at least one letter
- r")" # End of group 'key'
- r"\s*=\s*" # Equal Sign
- r"(?P<val>" # Start of group 'val'
- r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
- r"|" # or
- ""+ _LegalCharsPatt +"*" # Any word or empty string
- r")" # End of group 'val'
- r"\s*;?" # Probably ending in a semi-colon
- )
-
-
-# At long last, here is the cookie class.
-# Using this class is almost just like using a dictionary.
-# See this module's docstring for example usage.
-#
-class BaseCookie(UserDict):
- # A container class for a set of Morsels
- #
-
- def value_decode(self, val):
- """real_value, coded_value = value_decode(STRING)
- Called prior to setting a cookie's value from the network
- representation. The VALUE is the value read from HTTP
- header.
- Override this function to modify the behavior of cookies.
- """
- return val, val
- # end value_encode
-
- def value_encode(self, val):
- """real_value, coded_value = value_encode(VALUE)
- Called prior to setting a cookie's value from the dictionary
- representation. The VALUE is the value being assigned.
- Override this function to modify the behavior of cookies.
- """
- strval = str(val)
- return strval, strval
- # end value_encode
-
- def __init__(self, input=None):
- UserDict.__init__(self)
- if input: self.load(input)
- # end __init__
-
- def __set(self, key, real_value, coded_value):
- """Private method for setting a cookie's value"""
- M = self.get(key, Morsel())
- M.set(key, real_value, coded_value)
- UserDict.__setitem__(self, key, M)
- # end __set
-
- def __setitem__(self, key, value):
- """Dictionary style assignment."""
- rval, cval = self.value_encode(value)
- self.__set(key, rval, cval)
- # end __setitem__
-
- def output(self, attrs=None, header="Set-Cookie:", sep="\n"):
- """Return a string suitable for HTTP."""
- result = []
- for K,V in self.items():
- result.append( V.output(attrs, header) )
- return string.join(result, sep)
- # end output
-
- __str__ = output
-
- def __repr__(self):
- L = []
- for K,V in self.items():
- L.append( '%s=%s' % (K,repr(V.value) ) )
- return '<%s: %s>' % (self.__class__.__name__, string.join(L))
-
- def js_output(self, attrs=None):
- """Return a string suitable for JavaScript."""
- result = []
- for K,V in self.items():
- result.append( V.js_output(attrs) )
- return string.join(result, "")
- # end js_output
-
- def load(self, rawdata):
- """Load cookies from a string (presumably HTTP_COOKIE) or
- from a dictionary. Loading cookies from a dictionary 'd'
- is equivalent to calling:
- map(Cookie.__setitem__, d.keys(), d.values())
- """
- if type(rawdata) == type(""):
- self.__ParseString(rawdata)
- else:
- self.update(rawdata)
- return
- # end load()
-
- def __ParseString(self, str, patt=_CookiePattern):
- i = 0 # Our starting point
- n = len(str) # Length of string
- M = None # current morsel
-
- while 0 <= i < n:
- # Start looking for a cookie
- match = patt.search(str, i)
- if not match: break # No more cookies
-
- K,V = match.group("key"), match.group("val")
- i = match.end(0)
-
- # Parse the key, value in case it's metainfo
- if K[0] == "$":
- # We ignore attributes which pertain to the cookie
- # mechanism as a whole. See RFC 2109.
- # (Does anyone care?)
- if M:
- M[ K[1:] ] = V
- elif string.lower(K) in Morsel._reserved_keys:
- if M:
- M[ K ] = _unquote(V)
- else:
- rval, cval = self.value_decode(V)
- self.__set(K, rval, cval)
- M = self[K]
- # end __ParseString
-# end BaseCookie class
-
-class SimpleCookie(BaseCookie):
- """SimpleCookie
- SimpleCookie supports strings as cookie values. When setting
- the value using the dictionary assignment notation, SimpleCookie
- calls the builtin str() to convert the value to a string. Values
- received from HTTP are kept as strings.
- """
- def value_decode(self, val):
- return _unquote( val ), val
- def value_encode(self, val):
- strval = str(val)
- return strval, _quote( strval )
-# end SimpleCookie
-
-class SerialCookie(BaseCookie):
- """SerialCookie
- SerialCookie supports arbitrary objects as cookie values. All
- values are serialized (using cPickle) before being sent to the
- client. All incoming values are assumed to be valid Pickle
- representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
- FORMAT, THEN AN EXCEPTION WILL BE RAISED.
-
- Note: Large cookie values add overhead because they must be
- retransmitted on every HTTP transaction.
-
- Note: HTTP has a 2k limit on the size of a cookie. This class
- does not check for this limit, so be careful!!!
- """
- def value_decode(self, val):
- # This could raise an exception!
- return loads( _unquote(val) ), val
- def value_encode(self, val):
- return val, _quote( dumps(val) )
-# end SerialCookie
-
-class SmartCookie(BaseCookie):
- """SmartCookie
- SmartCookie supports arbitrary objects as cookie values. If the
- object is a string, then it is quoted. If the object is not a
- string, however, then SmartCookie will use cPickle to serialize
- the object into a string representation.
-
- Note: Large cookie values add overhead because they must be
- retransmitted on every HTTP transaction.
-
- Note: HTTP has a 2k limit on the size of a cookie. This class
- does not check for this limit, so be careful!!!
- """
- def value_decode(self, val):
- strval = _unquote(val)
- try:
- return loads(strval), val
- except:
- return strval, val
- def value_encode(self, val):
- if type(val) == type(""):
- return val, _quote(val)
- else:
- return val, _quote( dumps(val) )
-# end SmartCookie
-
-
-###########################################################
-# Backwards Compatibility: Don't break any existing code!
-
-# We provide Cookie() as an alias for SmartCookie()
-Cookie = SmartCookie
-
-#
-###########################################################
-
-
-
-#Local Variables:
-#tab-width: 4
-#end:
diff --git a/Lib/dos-8x3/fileinpu.py b/Lib/dos-8x3/fileinpu.py
deleted file mode 100644
index d1b7617..0000000
--- a/Lib/dos-8x3/fileinpu.py
+++ /dev/null
@@ -1,268 +0,0 @@
-"""Helper class to quickly write a loop over all standard input files.
-
-Typical use is:
-
- import fileinput
- for line in fileinput.input():
- process(line)
-
-This iterates over the lines of all files listed in sys.argv[1:],
-defaulting to sys.stdin if the list is empty. If a filename is '-' it
-is also replaced by sys.stdin. To specify an alternative list of
-filenames, pass it as the argument to input(). A single file name is
-also allowed.
-
-Functions filename(), lineno() return the filename and cumulative line
-number of the line that has just been read; filelineno() returns its
-line number in the current file; isfirstline() returns true iff the
-line just read is the first line of its file; isstdin() returns true
-iff the line was read from sys.stdin. Function nextfile() closes the
-current file so that the next iteration will read the first line from
-the next file (if any); lines not read from the file will not count
-towards the cumulative line count; the filename is not changed until
-after the first line of the next file has been read. Function close()
-closes the sequence.
-
-Before any lines have been read, filename() returns None and both line
-numbers are zero; nextfile() has no effect. After all lines have been
-read, filename() and the line number functions return the values
-pertaining to the last line read; nextfile() has no effect.
-
-All files are opened in text mode. If an I/O error occurs during
-opening or reading a file, the IOError exception is raised.
-
-If sys.stdin is used more than once, the second and further use will
-return no lines, except perhaps for interactive use, or if it has been
-explicitly reset (e.g. using sys.stdin.seek(0)).
-
-Empty files are opened and immediately closed; the only time their
-presence in the list of filenames is noticeable at all is when the
-last file opened is empty.
-
-It is possible that the last line of a file doesn't end in a newline
-character; otherwise lines are returned including the trailing
-newline.
-
-Class FileInput is the implementation; its methods filename(),
-lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
-correspond to the functions in the module. In addition it has a
-readline() method which returns the next input line, and a
-__getitem__() method which implements the sequence behavior. The
-sequence must be accessed in strictly sequential order; sequence
-access and readline() cannot be mixed.
-
-Optional in-place filtering: if the keyword argument inplace=1 is
-passed to input() or to the FileInput constructor, the file is moved
-to a backup file and standard output is directed to the input file.
-This makes it possible to write a filter that rewrites its input file
-in place. If the keyword argument backup=".<some extension>" is also
-given, it specifies the extension for the backup file, and the backup
-file remains around; by default, the extension is ".bak" and it is
-deleted when the output file is closed. In-place filtering is
-disabled when standard input is read. XXX The current implementation
-does not work for MS-DOS 8+3 filesystems.
-
-XXX Possible additions:
-
-- optional getopt argument processing
-- specify open mode ('r' or 'rb')
-- specify buffer size
-- fileno()
-- isatty()
-- read(), read(size), even readlines()
-
-"""
-
-import sys, os, stat
-
-_state = None
-
-def input(files=None, inplace=0, backup=""):
- global _state
- if _state and _state._file:
- raise RuntimeError, "input() already active"
- _state = FileInput(files, inplace, backup)
- return _state
-
-def close():
- global _state
- state = _state
- _state = None
- if state:
- state.close()
-
-def nextfile():
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.nextfile()
-
-def filename():
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.filename()
-
-def lineno():
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.lineno()
-
-def filelineno():
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.filelineno()
-
-def isfirstline():
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.isfirstline()
-
-def isstdin():
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.isstdin()
-
-class FileInput:
-
- def __init__(self, files=None, inplace=0, backup=""):
- if type(files) == type(''):
- files = (files,)
- else:
- if files is None:
- files = sys.argv[1:]
- if not files:
- files = ('-',)
- else:
- files = tuple(files)
- self._files = files
- self._inplace = inplace
- self._backup = backup
- self._savestdout = None
- self._output = None
- self._filename = None
- self._lineno = 0
- self._filelineno = 0
- self._file = None
- self._isstdin = 0
- self._backupfilename = None
-
- def __del__(self):
- self.close()
-
- def close(self):
- self.nextfile()
- self._files = ()
-
- def __getitem__(self, i):
- if i != self._lineno:
- raise RuntimeError, "accessing lines out of order"
- line = self.readline()
- if not line:
- raise IndexError, "end of input reached"
- return line
-
- def nextfile(self):
- savestdout = self._savestdout
- self._savestdout = 0
- if savestdout:
- sys.stdout = savestdout
-
- output = self._output
- self._output = 0
- if output:
- output.close()
-
- file = self._file
- self._file = 0
- if file and not self._isstdin:
- file.close()
-
- backupfilename = self._backupfilename
- self._backupfilename = 0
- if backupfilename and not self._backup:
- try: os.unlink(backupfilename)
- except: pass
-
- self._isstdin = 0
-
- def readline(self):
- if not self._file:
- if not self._files:
- return ""
- self._filename = self._files[0]
- self._files = self._files[1:]
- self._filelineno = 0
- self._file = None
- self._isstdin = 0
- self._backupfilename = 0
- if self._filename == '-':
- self._filename = '<stdin>'
- self._file = sys.stdin
- self._isstdin = 1
- else:
- if self._inplace:
- self._backupfilename = (
- self._filename + (self._backup or ".bak"))
- try: os.unlink(self._backupfilename)
- except os.error: pass
- # The next few lines may raise IOError
- os.rename(self._filename, self._backupfilename)
- self._file = open(self._backupfilename, "r")
- try:
- perm = os.fstat(self._file.fileno())[stat.ST_MODE]
- except:
- self._output = open(self._filename, "w")
- else:
- fd = os.open(self._filename,
- os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
- perm)
- self._output = os.fdopen(fd, "w")
- try:
- os.chmod(self._filename, perm)
- except:
- pass
- self._savestdout = sys.stdout
- sys.stdout = self._output
- else:
- # This may raise IOError
- self._file = open(self._filename, "r")
- line = self._file.readline()
- if line:
- self._lineno = self._lineno + 1
- self._filelineno = self._filelineno + 1
- return line
- self.nextfile()
- # Recursive call
- return self.readline()
-
- def filename(self):
- return self._filename
-
- def lineno(self):
- return self._lineno
-
- def filelineno(self):
- return self._filelineno
-
- def isfirstline(self):
- return self._filelineno == 1
-
- def isstdin(self):
- return self._isstdin
-
-def _test():
- import getopt
- inplace = 0
- backup = 0
- opts, args = getopt.getopt(sys.argv[1:], "ib:")
- for o, a in opts:
- if o == '-i': inplace = 1
- if o == '-b': backup = a
- for line in input(args, inplace=inplace, backup=backup):
- if line[-1:] == '\n': line = line[:-1]
- if line[-1:] == '\r': line = line[:-1]
- print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
- isfirstline() and "*" or "", line)
- print "%d: %s[%d]" % (lineno(), filename(), filelineno())
-
-if __name__ == '__main__':
- _test()
diff --git a/Lib/dos-8x3/formatte.py b/Lib/dos-8x3/formatte.py
deleted file mode 100755
index 4d6a129..0000000
--- a/Lib/dos-8x3/formatte.py
+++ /dev/null
@@ -1,422 +0,0 @@
-"""Generic output formatting.
-
-Formatter objects transform an abstract flow of formatting events into
-specific output events on writer objects. Formatters manage several stack
-structures to allow various properties of a writer object to be changed and
-restored; writers need not be able to handle relative changes nor any sort
-of ``change back'' operation. Specific writer properties which may be
-controlled via formatter objects are horizontal alignment, font, and left
-margin indentations. A mechanism is provided which supports providing
-arbitrary, non-exclusive style settings to a writer as well. Additional
-interfaces facilitate formatting events which are not reversible, such as
-paragraph separation.
-
-Writer objects encapsulate device interfaces. Abstract devices, such as
-file formats, are supported as well as physical devices. The provided
-implementations all work with abstract devices. The interface makes
-available mechanisms for setting the properties which formatter objects
-manage and inserting data into the output.
-"""
-
-import string
-import sys
-from types import StringType
-
-
-AS_IS = None
-
-
-class NullFormatter:
-
- def __init__(self, writer=None):
- if not writer:
- writer = NullWriter()
- self.writer = writer
- def end_paragraph(self, blankline): pass
- def add_line_break(self): pass
- def add_hor_rule(self, *args, **kw): pass
- def add_label_data(self, format, counter, blankline=None): pass
- def add_flowing_data(self, data): pass
- def add_literal_data(self, data): pass
- def flush_softspace(self): pass
- def push_alignment(self, align): pass
- def pop_alignment(self): pass
- def push_font(self, x): pass
- def pop_font(self): pass
- def push_margin(self, margin): pass
- def pop_margin(self): pass
- def set_spacing(self, spacing): pass
- def push_style(self, *styles): pass
- def pop_style(self, n=1): pass
- def assert_line_data(self, flag=1): pass
-
-
-class AbstractFormatter:
-
- # Space handling policy: blank spaces at the boundary between elements
- # are handled by the outermost context. "Literal" data is not checked
- # to determine context, so spaces in literal data are handled directly
- # in all circumstances.
-
- def __init__(self, writer):
- self.writer = writer # Output device
- self.align = None # Current alignment
- self.align_stack = [] # Alignment stack
- self.font_stack = [] # Font state
- self.margin_stack = [] # Margin state
- self.spacing = None # Vertical spacing state
- self.style_stack = [] # Other state, e.g. color
- self.nospace = 1 # Should leading space be suppressed
- self.softspace = 0 # Should a space be inserted
- self.para_end = 1 # Just ended a paragraph
- self.parskip = 0 # Skipped space between paragraphs?
- self.hard_break = 1 # Have a hard break
- self.have_label = 0
-
- def end_paragraph(self, blankline):
- if not self.hard_break:
- self.writer.send_line_break()
- self.have_label = 0
- if self.parskip < blankline and not self.have_label:
- self.writer.send_paragraph(blankline - self.parskip)
- self.parskip = blankline
- self.have_label = 0
- self.hard_break = self.nospace = self.para_end = 1
- self.softspace = 0
-
- def add_line_break(self):
- if not (self.hard_break or self.para_end):
- self.writer.send_line_break()
- self.have_label = self.parskip = 0
- self.hard_break = self.nospace = 1
- self.softspace = 0
-
- def add_hor_rule(self, *args, **kw):
- if not self.hard_break:
- self.writer.send_line_break()
- apply(self.writer.send_hor_rule, args, kw)
- self.hard_break = self.nospace = 1
- self.have_label = self.para_end = self.softspace = self.parskip = 0
-
- def add_label_data(self, format, counter, blankline = None):
- if self.have_label or not self.hard_break:
- self.writer.send_line_break()
- if not self.para_end:
- self.writer.send_paragraph((blankline and 1) or 0)
- if type(format) is StringType:
- self.writer.send_label_data(self.format_counter(format, counter))
- else:
- self.writer.send_label_data(format)
- self.nospace = self.have_label = self.hard_break = self.para_end = 1
- self.softspace = self.parskip = 0
-
- def format_counter(self, format, counter):
- label = ''
- for c in format:
- try:
- if c == '1':
- label = label + ('%d' % counter)
- elif c in 'aA':
- if counter > 0:
- label = label + self.format_letter(c, counter)
- elif c in 'iI':
- if counter > 0:
- label = label + self.format_roman(c, counter)
- else:
- label = label + c
- except:
- label = label + c
- return label
-
- def format_letter(self, case, counter):
- label = ''
- while counter > 0:
- counter, x = divmod(counter-1, 26)
- s = chr(ord(case) + x)
- label = s + label
- return label
-
- def format_roman(self, case, counter):
- ones = ['i', 'x', 'c', 'm']
- fives = ['v', 'l', 'd']
- label, index = '', 0
- # This will die of IndexError when counter is too big
- while counter > 0:
- counter, x = divmod(counter, 10)
- if x == 9:
- label = ones[index] + ones[index+1] + label
- elif x == 4:
- label = ones[index] + fives[index] + label
- else:
- if x >= 5:
- s = fives[index]
- x = x-5
- else:
- s = ''
- s = s + ones[index]*x
- label = s + label
- index = index + 1
- if case == 'I':
- return string.upper(label)
- return label
-
- def add_flowing_data(self, data,
- # These are only here to load them into locals:
- whitespace = string.whitespace,
- join = string.join, split = string.split):
- if not data: return
- # The following looks a bit convoluted but is a great improvement over
- # data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
- prespace = data[:1] in whitespace
- postspace = data[-1:] in whitespace
- data = join(split(data))
- if self.nospace and not data:
- return
- elif prespace or self.softspace:
- if not data:
- if not self.nospace:
- self.softspace = 1
- self.parskip = 0
- return
- if not self.nospace:
- data = ' ' + data
- self.hard_break = self.nospace = self.para_end = \
- self.parskip = self.have_label = 0
- self.softspace = postspace
- self.writer.send_flowing_data(data)
-
- def add_literal_data(self, data):
- if not data: return
- if self.softspace:
- self.writer.send_flowing_data(" ")
- self.hard_break = data[-1:] == '\n'
- self.nospace = self.para_end = self.softspace = \
- self.parskip = self.have_label = 0
- self.writer.send_literal_data(data)
-
- def flush_softspace(self):
- if self.softspace:
- self.hard_break = self.para_end = self.parskip = \
- self.have_label = self.softspace = 0
- self.nospace = 1
- self.writer.send_flowing_data(' ')
-
- def push_alignment(self, align):
- if align and align != self.align:
- self.writer.new_alignment(align)
- self.align = align
- self.align_stack.append(align)
- else:
- self.align_stack.append(self.align)
-
- def pop_alignment(self):
- if self.align_stack:
- del self.align_stack[-1]
- if self.align_stack:
- self.align = align = self.align_stack[-1]
- self.writer.new_alignment(align)
- else:
- self.align = None
- self.writer.new_alignment(None)
-
- def push_font(self, (size, i, b, tt)):
- if self.softspace:
- self.hard_break = self.para_end = self.softspace = 0
- self.nospace = 1
- self.writer.send_flowing_data(' ')
- if self.font_stack:
- csize, ci, cb, ctt = self.font_stack[-1]
- if size is AS_IS: size = csize
- if i is AS_IS: i = ci
- if b is AS_IS: b = cb
- if tt is AS_IS: tt = ctt
- font = (size, i, b, tt)
- self.font_stack.append(font)
- self.writer.new_font(font)
-
- def pop_font(self):
- if self.font_stack:
- del self.font_stack[-1]
- if self.font_stack:
- font = self.font_stack[-1]
- else:
- font = None
- self.writer.new_font(font)
-
- def push_margin(self, margin):
- self.margin_stack.append(margin)
- fstack = filter(None, self.margin_stack)
- if not margin and fstack:
- margin = fstack[-1]
- self.writer.new_margin(margin, len(fstack))
-
- def pop_margin(self):
- if self.margin_stack:
- del self.margin_stack[-1]
- fstack = filter(None, self.margin_stack)
- if fstack:
- margin = fstack[-1]
- else:
- margin = None
- self.writer.new_margin(margin, len(fstack))
-
- def set_spacing(self, spacing):
- self.spacing = spacing
- self.writer.new_spacing(spacing)
-
- def push_style(self, *styles):
- if self.softspace:
- self.hard_break = self.para_end = self.softspace = 0
- self.nospace = 1
- self.writer.send_flowing_data(' ')
- for style in styles:
- self.style_stack.append(style)
- self.writer.new_styles(tuple(self.style_stack))
-
- def pop_style(self, n=1):
- del self.style_stack[-n:]
- self.writer.new_styles(tuple(self.style_stack))
-
- def assert_line_data(self, flag=1):
- self.nospace = self.hard_break = not flag
- self.para_end = self.parskip = self.have_label = 0
-
-
-class NullWriter:
- """Minimal writer interface to use in testing & inheritance."""
- def __init__(self): pass
- def flush(self): pass
- def new_alignment(self, align): pass
- def new_font(self, font): pass
- def new_margin(self, margin, level): pass
- def new_spacing(self, spacing): pass
- def new_styles(self, styles): pass
- def send_paragraph(self, blankline): pass
- def send_line_break(self): pass
- def send_hor_rule(self, *args, **kw): pass
- def send_label_data(self, data): pass
- def send_flowing_data(self, data): pass
- def send_literal_data(self, data): pass
-
-
-class AbstractWriter(NullWriter):
-
- def __init__(self):
- pass
-
- def new_alignment(self, align):
- print "new_alignment(%s)" % `align`
-
- def new_font(self, font):
- print "new_font(%s)" % `font`
-
- def new_margin(self, margin, level):
- print "new_margin(%s, %d)" % (`margin`, level)
-
- def new_spacing(self, spacing):
- print "new_spacing(%s)" % `spacing`
-
- def new_styles(self, styles):
- print "new_styles(%s)" % `styles`
-
- def send_paragraph(self, blankline):
- print "send_paragraph(%s)" % `blankline`
-
- def send_line_break(self):
- print "send_line_break()"
-
- def send_hor_rule(self, *args, **kw):
- print "send_hor_rule()"
-
- def send_label_data(self, data):
- print "send_label_data(%s)" % `data`
-
- def send_flowing_data(self, data):
- print "send_flowing_data(%s)" % `data`
-
- def send_literal_data(self, data):
- print "send_literal_data(%s)" % `data`
-
-
-class DumbWriter(NullWriter):
-
- def __init__(self, file=None, maxcol=72):
- self.file = file or sys.stdout
- self.maxcol = maxcol
- NullWriter.__init__(self)
- self.reset()
-
- def reset(self):
- self.col = 0
- self.atbreak = 0
-
- def send_paragraph(self, blankline):
- self.file.write('\n'*blankline)
- self.col = 0
- self.atbreak = 0
-
- def send_line_break(self):
- self.file.write('\n')
- self.col = 0
- self.atbreak = 0
-
- def send_hor_rule(self, *args, **kw):
- self.file.write('\n')
- self.file.write('-'*self.maxcol)
- self.file.write('\n')
- self.col = 0
- self.atbreak = 0
-
- def send_literal_data(self, data):
- self.file.write(data)
- i = string.rfind(data, '\n')
- if i >= 0:
- self.col = 0
- data = data[i+1:]
- data = string.expandtabs(data)
- self.col = self.col + len(data)
- self.atbreak = 0
-
- def send_flowing_data(self, data):
- if not data: return
- atbreak = self.atbreak or data[0] in string.whitespace
- col = self.col
- maxcol = self.maxcol
- write = self.file.write
- for word in string.split(data):
- if atbreak:
- if col + len(word) >= maxcol:
- write('\n')
- col = 0
- else:
- write(' ')
- col = col + 1
- write(word)
- col = col + len(word)
- atbreak = 1
- self.col = col
- self.atbreak = data[-1] in string.whitespace
-
-
-def test(file = None):
- w = DumbWriter()
- f = AbstractFormatter(w)
- if file:
- fp = open(file)
- elif sys.argv[1:]:
- fp = open(sys.argv[1])
- else:
- fp = sys.stdin
- while 1:
- line = fp.readline()
- if not line:
- break
- if line == '\n':
- f.end_paragraph(1)
- else:
- f.add_flowing_data(line)
- f.end_paragraph(0)
-
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/gopherli.py b/Lib/dos-8x3/gopherli.py
deleted file mode 100755
index 6965fbd..0000000
--- a/Lib/dos-8x3/gopherli.py
+++ /dev/null
@@ -1,208 +0,0 @@
-"""Gopher protocol client interface."""
-
-import string
-
-# Default selector, host and port
-DEF_SELECTOR = '1/'
-DEF_HOST = 'gopher.micro.umn.edu'
-DEF_PORT = 70
-
-# Recognized file types
-A_TEXT = '0'
-A_MENU = '1'
-A_CSO = '2'
-A_ERROR = '3'
-A_MACBINHEX = '4'
-A_PCBINHEX = '5'
-A_UUENCODED = '6'
-A_INDEX = '7'
-A_TELNET = '8'
-A_BINARY = '9'
-A_DUPLICATE = '+'
-A_SOUND = 's'
-A_EVENT = 'e'
-A_CALENDAR = 'c'
-A_HTML = 'h'
-A_TN3270 = 'T'
-A_MIME = 'M'
-A_IMAGE = 'I'
-A_WHOIS = 'w'
-A_QUERY = 'q'
-A_GIF = 'g'
-A_HTML = 'h' # HTML file
-A_WWW = 'w' # WWW address
-A_PLUS_IMAGE = ':'
-A_PLUS_MOVIE = ';'
-A_PLUS_SOUND = '<'
-
-
-_names = dir()
-_type_to_name_map = {}
-def type_to_name(gtype):
- """Map all file types to strings; unknown types become TYPE='x'."""
- global _type_to_name_map
- if _type_to_name_map=={}:
- for name in _names:
- if name[:2] == 'A_':
- _type_to_name_map[eval(name)] = name[2:]
- if _type_to_name_map.has_key(gtype):
- return _type_to_name_map[gtype]
- return 'TYPE=' + `gtype`
-
-# Names for characters and strings
-CRLF = '\r\n'
-TAB = '\t'
-
-def send_selector(selector, host, port = 0):
- """Send a selector to a given host and port, return a file with the reply."""
- import socket
- import string
- if not port:
- i = string.find(host, ':')
- if i >= 0:
- host, port = host[:i], string.atoi(host[i+1:])
- if not port:
- port = DEF_PORT
- elif type(port) == type(''):
- port = string.atoi(port)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((host, port))
- s.send(selector + CRLF)
- s.shutdown(1)
- return s.makefile('rb')
-
-def send_query(selector, query, host, port = 0):
- """Send a selector and a query string."""
- return send_selector(selector + '\t' + query, host, port)
-
-def path_to_selector(path):
- """Takes a path as returned by urlparse and returns the appropriate selector."""
- if path=="/":
- return "/"
- else:
- return path[2:] # Cuts initial slash and data type identifier
-
-def path_to_datatype_name(path):
- """Takes a path as returned by urlparse and maps it to a string.
- See section 3.4 of RFC 1738 for details."""
- if path=="/":
- # No way to tell, although "INDEX" is likely
- return "TYPE='unknown'"
- else:
- return type_to_name(path[1])
-
-# The following functions interpret the data returned by the gopher
-# server according to the expected type, e.g. textfile or directory
-
-def get_directory(f):
- """Get a directory in the form of a list of entries."""
- import string
- list = []
- while 1:
- line = f.readline()
- if not line:
- print '(Unexpected EOF from server)'
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] in CRLF:
- line = line[:-1]
- if line == '.':
- break
- if not line:
- print '(Empty line from server)'
- continue
- gtype = line[0]
- parts = string.splitfields(line[1:], TAB)
- if len(parts) < 4:
- print '(Bad line from server:', `line`, ')'
- continue
- if len(parts) > 4:
- if parts[4:] != ['+']:
- print '(Extra info from server:',
- print parts[4:], ')'
- else:
- parts.append('')
- parts.insert(0, gtype)
- list.append(parts)
- return list
-
-def get_textfile(f):
- """Get a text file as a list of lines, with trailing CRLF stripped."""
- list = []
- get_alt_textfile(f, list.append)
- return list
-
-def get_alt_textfile(f, func):
- """Get a text file and pass each line to a function, with trailing CRLF stripped."""
- while 1:
- line = f.readline()
- if not line:
- print '(Unexpected EOF from server)'
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] in CRLF:
- line = line[:-1]
- if line == '.':
- break
- if line[:2] == '..':
- line = line[1:]
- func(line)
-
-def get_binary(f):
- """Get a binary file as one solid data block."""
- data = f.read()
- return data
-
-def get_alt_binary(f, func, blocksize):
- """Get a binary file and pass each block to a function."""
- while 1:
- data = f.read(blocksize)
- if not data:
- break
- func(data)
-
-def test():
- """Trivial test program."""
- import sys
- import getopt
- opts, args = getopt.getopt(sys.argv[1:], '')
- selector = DEF_SELECTOR
- type = selector[0]
- host = DEF_HOST
- port = DEF_PORT
- if args:
- host = args[0]
- args = args[1:]
- if args:
- type = args[0]
- args = args[1:]
- if len(type) > 1:
- type, selector = type[0], type
- else:
- selector = ''
- if args:
- selector = args[0]
- args = args[1:]
- query = ''
- if args:
- query = args[0]
- args = args[1:]
- if type == A_INDEX:
- f = send_query(selector, query, host)
- else:
- f = send_selector(selector, host)
- if type == A_TEXT:
- list = get_textfile(f)
- for item in list: print item
- elif type in (A_MENU, A_INDEX):
- list = get_directory(f)
- for item in list: print item
- else:
- data = get_binary(f)
- print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
-
-# Run the test when run as script
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/htmlenti.py b/Lib/dos-8x3/htmlenti.py
deleted file mode 100755
index 6682bf2..0000000
--- a/Lib/dos-8x3/htmlenti.py
+++ /dev/null
@@ -1,257 +0,0 @@
-"""HTML character entity references."""
-
-entitydefs = {
- 'AElig': '\306', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
- 'Aacute': '\301', # latin capital letter A with acute, U+00C1 ISOlat1
- 'Acirc': '\302', # latin capital letter A with circumflex, U+00C2 ISOlat1
- 'Agrave': '\300', # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
- 'Alpha': '&#913;', # greek capital letter alpha, U+0391
- 'Aring': '\305', # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
- 'Atilde': '\303', # latin capital letter A with tilde, U+00C3 ISOlat1
- 'Auml': '\304', # latin capital letter A with diaeresis, U+00C4 ISOlat1
- 'Beta': '&#914;', # greek capital letter beta, U+0392
- 'Ccedil': '\307', # latin capital letter C with cedilla, U+00C7 ISOlat1
- 'Chi': '&#935;', # greek capital letter chi, U+03A7
- 'Dagger': '&#8225;', # double dagger, U+2021 ISOpub
- 'Delta': '&#916;', # greek capital letter delta, U+0394 ISOgrk3
- 'ETH': '\320', # latin capital letter ETH, U+00D0 ISOlat1
- 'Eacute': '\311', # latin capital letter E with acute, U+00C9 ISOlat1
- 'Ecirc': '\312', # latin capital letter E with circumflex, U+00CA ISOlat1
- 'Egrave': '\310', # latin capital letter E with grave, U+00C8 ISOlat1
- 'Epsilon': '&#917;', # greek capital letter epsilon, U+0395
- 'Eta': '&#919;', # greek capital letter eta, U+0397
- 'Euml': '\313', # latin capital letter E with diaeresis, U+00CB ISOlat1
- 'Gamma': '&#915;', # greek capital letter gamma, U+0393 ISOgrk3
- 'Iacute': '\315', # latin capital letter I with acute, U+00CD ISOlat1
- 'Icirc': '\316', # latin capital letter I with circumflex, U+00CE ISOlat1
- 'Igrave': '\314', # latin capital letter I with grave, U+00CC ISOlat1
- 'Iota': '&#921;', # greek capital letter iota, U+0399
- 'Iuml': '\317', # latin capital letter I with diaeresis, U+00CF ISOlat1
- 'Kappa': '&#922;', # greek capital letter kappa, U+039A
- 'Lambda': '&#923;', # greek capital letter lambda, U+039B ISOgrk3
- 'Mu': '&#924;', # greek capital letter mu, U+039C
- 'Ntilde': '\321', # latin capital letter N with tilde, U+00D1 ISOlat1
- 'Nu': '&#925;', # greek capital letter nu, U+039D
- 'OElig': '&#338;', # latin capital ligature OE, U+0152 ISOlat2
- 'Oacute': '\323', # latin capital letter O with acute, U+00D3 ISOlat1
- 'Ocirc': '\324', # latin capital letter O with circumflex, U+00D4 ISOlat1
- 'Ograve': '\322', # latin capital letter O with grave, U+00D2 ISOlat1
- 'Omega': '&#937;', # greek capital letter omega, U+03A9 ISOgrk3
- 'Omicron': '&#927;', # greek capital letter omicron, U+039F
- 'Oslash': '\330', # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
- 'Otilde': '\325', # latin capital letter O with tilde, U+00D5 ISOlat1
- 'Ouml': '\326', # latin capital letter O with diaeresis, U+00D6 ISOlat1
- 'Phi': '&#934;', # greek capital letter phi, U+03A6 ISOgrk3
- 'Pi': '&#928;', # greek capital letter pi, U+03A0 ISOgrk3
- 'Prime': '&#8243;', # double prime = seconds = inches, U+2033 ISOtech
- 'Psi': '&#936;', # greek capital letter psi, U+03A8 ISOgrk3
- 'Rho': '&#929;', # greek capital letter rho, U+03A1
- 'Scaron': '&#352;', # latin capital letter S with caron, U+0160 ISOlat2
- 'Sigma': '&#931;', # greek capital letter sigma, U+03A3 ISOgrk3
- 'THORN': '\336', # latin capital letter THORN, U+00DE ISOlat1
- 'Tau': '&#932;', # greek capital letter tau, U+03A4
- 'Theta': '&#920;', # greek capital letter theta, U+0398 ISOgrk3
- 'Uacute': '\332', # latin capital letter U with acute, U+00DA ISOlat1
- 'Ucirc': '\333', # latin capital letter U with circumflex, U+00DB ISOlat1
- 'Ugrave': '\331', # latin capital letter U with grave, U+00D9 ISOlat1
- 'Upsilon': '&#933;', # greek capital letter upsilon, U+03A5 ISOgrk3
- 'Uuml': '\334', # latin capital letter U with diaeresis, U+00DC ISOlat1
- 'Xi': '&#926;', # greek capital letter xi, U+039E ISOgrk3
- 'Yacute': '\335', # latin capital letter Y with acute, U+00DD ISOlat1
- 'Yuml': '&#376;', # latin capital letter Y with diaeresis, U+0178 ISOlat2
- 'Zeta': '&#918;', # greek capital letter zeta, U+0396
- 'aacute': '\341', # latin small letter a with acute, U+00E1 ISOlat1
- 'acirc': '\342', # latin small letter a with circumflex, U+00E2 ISOlat1
- 'acute': '\264', # acute accent = spacing acute, U+00B4 ISOdia
- 'aelig': '\346', # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
- 'agrave': '\340', # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
- 'alefsym': '&#8501;', # alef symbol = first transfinite cardinal, U+2135 NEW
- 'alpha': '&#945;', # greek small letter alpha, U+03B1 ISOgrk3
- 'amp': '\46', # ampersand, U+0026 ISOnum
- 'and': '&#8743;', # logical and = wedge, U+2227 ISOtech
- 'ang': '&#8736;', # angle, U+2220 ISOamso
- 'aring': '\345', # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
- 'asymp': '&#8776;', # almost equal to = asymptotic to, U+2248 ISOamsr
- 'atilde': '\343', # latin small letter a with tilde, U+00E3 ISOlat1
- 'auml': '\344', # latin small letter a with diaeresis, U+00E4 ISOlat1
- 'bdquo': '&#8222;', # double low-9 quotation mark, U+201E NEW
- 'beta': '&#946;', # greek small letter beta, U+03B2 ISOgrk3
- 'brvbar': '\246', # broken bar = broken vertical bar, U+00A6 ISOnum
- 'bull': '&#8226;', # bullet = black small circle, U+2022 ISOpub
- 'cap': '&#8745;', # intersection = cap, U+2229 ISOtech
- 'ccedil': '\347', # latin small letter c with cedilla, U+00E7 ISOlat1
- 'cedil': '\270', # cedilla = spacing cedilla, U+00B8 ISOdia
- 'cent': '\242', # cent sign, U+00A2 ISOnum
- 'chi': '&#967;', # greek small letter chi, U+03C7 ISOgrk3
- 'circ': '&#710;', # modifier letter circumflex accent, U+02C6 ISOpub
- 'clubs': '&#9827;', # black club suit = shamrock, U+2663 ISOpub
- 'cong': '&#8773;', # approximately equal to, U+2245 ISOtech
- 'copy': '\251', # copyright sign, U+00A9 ISOnum
- 'crarr': '&#8629;', # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
- 'cup': '&#8746;', # union = cup, U+222A ISOtech
- 'curren': '\244', # currency sign, U+00A4 ISOnum
- 'dArr': '&#8659;', # downwards double arrow, U+21D3 ISOamsa
- 'dagger': '&#8224;', # dagger, U+2020 ISOpub
- 'darr': '&#8595;', # downwards arrow, U+2193 ISOnum
- 'deg': '\260', # degree sign, U+00B0 ISOnum
- 'delta': '&#948;', # greek small letter delta, U+03B4 ISOgrk3
- 'diams': '&#9830;', # black diamond suit, U+2666 ISOpub
- 'divide': '\367', # division sign, U+00F7 ISOnum
- 'eacute': '\351', # latin small letter e with acute, U+00E9 ISOlat1
- 'ecirc': '\352', # latin small letter e with circumflex, U+00EA ISOlat1
- 'egrave': '\350', # latin small letter e with grave, U+00E8 ISOlat1
- 'empty': '&#8709;', # empty set = null set = diameter, U+2205 ISOamso
- 'emsp': '&#8195;', # em space, U+2003 ISOpub
- 'ensp': '&#8194;', # en space, U+2002 ISOpub
- 'epsilon': '&#949;', # greek small letter epsilon, U+03B5 ISOgrk3
- 'equiv': '&#8801;', # identical to, U+2261 ISOtech
- 'eta': '&#951;', # greek small letter eta, U+03B7 ISOgrk3
- 'eth': '\360', # latin small letter eth, U+00F0 ISOlat1
- 'euml': '\353', # latin small letter e with diaeresis, U+00EB ISOlat1
- 'euro': '&#8364;', # euro sign, U+20AC NEW
- 'exist': '&#8707;', # there exists, U+2203 ISOtech
- 'fnof': '&#402;', # latin small f with hook = function = florin, U+0192 ISOtech
- 'forall': '&#8704;', # for all, U+2200 ISOtech
- 'frac12': '\275', # vulgar fraction one half = fraction one half, U+00BD ISOnum
- 'frac14': '\274', # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
- 'frac34': '\276', # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
- 'frasl': '&#8260;', # fraction slash, U+2044 NEW
- 'gamma': '&#947;', # greek small letter gamma, U+03B3 ISOgrk3
- 'ge': '&#8805;', # greater-than or equal to, U+2265 ISOtech
- 'gt': '\76', # greater-than sign, U+003E ISOnum
- 'hArr': '&#8660;', # left right double arrow, U+21D4 ISOamsa
- 'harr': '&#8596;', # left right arrow, U+2194 ISOamsa
- 'hearts': '&#9829;', # black heart suit = valentine, U+2665 ISOpub
- 'hellip': '&#8230;', # horizontal ellipsis = three dot leader, U+2026 ISOpub
- 'iacute': '\355', # latin small letter i with acute, U+00ED ISOlat1
- 'icirc': '\356', # latin small letter i with circumflex, U+00EE ISOlat1
- 'iexcl': '\241', # inverted exclamation mark, U+00A1 ISOnum
- 'igrave': '\354', # latin small letter i with grave, U+00EC ISOlat1
- 'image': '&#8465;', # blackletter capital I = imaginary part, U+2111 ISOamso
- 'infin': '&#8734;', # infinity, U+221E ISOtech
- 'int': '&#8747;', # integral, U+222B ISOtech
- 'iota': '&#953;', # greek small letter iota, U+03B9 ISOgrk3
- 'iquest': '\277', # inverted question mark = turned question mark, U+00BF ISOnum
- 'isin': '&#8712;', # element of, U+2208 ISOtech
- 'iuml': '\357', # latin small letter i with diaeresis, U+00EF ISOlat1
- 'kappa': '&#954;', # greek small letter kappa, U+03BA ISOgrk3
- 'lArr': '&#8656;', # leftwards double arrow, U+21D0 ISOtech
- 'lambda': '&#955;', # greek small letter lambda, U+03BB ISOgrk3
- 'lang': '&#9001;', # left-pointing angle bracket = bra, U+2329 ISOtech
- 'laquo': '\253', # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
- 'larr': '&#8592;', # leftwards arrow, U+2190 ISOnum
- 'lceil': '&#8968;', # left ceiling = apl upstile, U+2308 ISOamsc
- 'ldquo': '&#8220;', # left double quotation mark, U+201C ISOnum
- 'le': '&#8804;', # less-than or equal to, U+2264 ISOtech
- 'lfloor': '&#8970;', # left floor = apl downstile, U+230A ISOamsc
- 'lowast': '&#8727;', # asterisk operator, U+2217 ISOtech
- 'loz': '&#9674;', # lozenge, U+25CA ISOpub
- 'lrm': '&#8206;', # left-to-right mark, U+200E NEW RFC 2070
- 'lsaquo': '&#8249;', # single left-pointing angle quotation mark, U+2039 ISO proposed
- 'lsquo': '&#8216;', # left single quotation mark, U+2018 ISOnum
- 'lt': '\74', # less-than sign, U+003C ISOnum
- 'macr': '\257', # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
- 'mdash': '&#8212;', # em dash, U+2014 ISOpub
- 'micro': '\265', # micro sign, U+00B5 ISOnum
- 'middot': '\267', # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
- 'minus': '&#8722;', # minus sign, U+2212 ISOtech
- 'mu': '&#956;', # greek small letter mu, U+03BC ISOgrk3
- 'nabla': '&#8711;', # nabla = backward difference, U+2207 ISOtech
- 'nbsp': '\240', # no-break space = non-breaking space, U+00A0 ISOnum
- 'ndash': '&#8211;', # en dash, U+2013 ISOpub
- 'ne': '&#8800;', # not equal to, U+2260 ISOtech
- 'ni': '&#8715;', # contains as member, U+220B ISOtech
- 'not': '\254', # not sign, U+00AC ISOnum
- 'notin': '&#8713;', # not an element of, U+2209 ISOtech
- 'nsub': '&#8836;', # not a subset of, U+2284 ISOamsn
- 'ntilde': '\361', # latin small letter n with tilde, U+00F1 ISOlat1
- 'nu': '&#957;', # greek small letter nu, U+03BD ISOgrk3
- 'oacute': '\363', # latin small letter o with acute, U+00F3 ISOlat1
- 'ocirc': '\364', # latin small letter o with circumflex, U+00F4 ISOlat1
- 'oelig': '&#339;', # latin small ligature oe, U+0153 ISOlat2
- 'ograve': '\362', # latin small letter o with grave, U+00F2 ISOlat1
- 'oline': '&#8254;', # overline = spacing overscore, U+203E NEW
- 'omega': '&#969;', # greek small letter omega, U+03C9 ISOgrk3
- 'omicron': '&#959;', # greek small letter omicron, U+03BF NEW
- 'oplus': '&#8853;', # circled plus = direct sum, U+2295 ISOamsb
- 'or': '&#8744;', # logical or = vee, U+2228 ISOtech
- 'ordf': '\252', # feminine ordinal indicator, U+00AA ISOnum
- 'ordm': '\272', # masculine ordinal indicator, U+00BA ISOnum
- 'oslash': '\370', # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
- 'otilde': '\365', # latin small letter o with tilde, U+00F5 ISOlat1
- 'otimes': '&#8855;', # circled times = vector product, U+2297 ISOamsb
- 'ouml': '\366', # latin small letter o with diaeresis, U+00F6 ISOlat1
- 'para': '\266', # pilcrow sign = paragraph sign, U+00B6 ISOnum
- 'part': '&#8706;', # partial differential, U+2202 ISOtech
- 'permil': '&#8240;', # per mille sign, U+2030 ISOtech
- 'perp': '&#8869;', # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
- 'phi': '&#966;', # greek small letter phi, U+03C6 ISOgrk3
- 'pi': '&#960;', # greek small letter pi, U+03C0 ISOgrk3
- 'piv': '&#982;', # greek pi symbol, U+03D6 ISOgrk3
- 'plusmn': '\261', # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
- 'pound': '\243', # pound sign, U+00A3 ISOnum
- 'prime': '&#8242;', # prime = minutes = feet, U+2032 ISOtech
- 'prod': '&#8719;', # n-ary product = product sign, U+220F ISOamsb
- 'prop': '&#8733;', # proportional to, U+221D ISOtech
- 'psi': '&#968;', # greek small letter psi, U+03C8 ISOgrk3
- 'quot': '\42', # quotation mark = APL quote, U+0022 ISOnum
- 'rArr': '&#8658;', # rightwards double arrow, U+21D2 ISOtech
- 'radic': '&#8730;', # square root = radical sign, U+221A ISOtech
- 'rang': '&#9002;', # right-pointing angle bracket = ket, U+232A ISOtech
- 'raquo': '\273', # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
- 'rarr': '&#8594;', # rightwards arrow, U+2192 ISOnum
- 'rceil': '&#8969;', # right ceiling, U+2309 ISOamsc
- 'rdquo': '&#8221;', # right double quotation mark, U+201D ISOnum
- 'real': '&#8476;', # blackletter capital R = real part symbol, U+211C ISOamso
- 'reg': '\256', # registered sign = registered trade mark sign, U+00AE ISOnum
- 'rfloor': '&#8971;', # right floor, U+230B ISOamsc
- 'rho': '&#961;', # greek small letter rho, U+03C1 ISOgrk3
- 'rlm': '&#8207;', # right-to-left mark, U+200F NEW RFC 2070
- 'rsaquo': '&#8250;', # single right-pointing angle quotation mark, U+203A ISO proposed
- 'rsquo': '&#8217;', # right single quotation mark, U+2019 ISOnum
- 'sbquo': '&#8218;', # single low-9 quotation mark, U+201A NEW
- 'scaron': '&#353;', # latin small letter s with caron, U+0161 ISOlat2
- 'sdot': '&#8901;', # dot operator, U+22C5 ISOamsb
- 'sect': '\247', # section sign, U+00A7 ISOnum
- 'shy': '\255', # soft hyphen = discretionary hyphen, U+00AD ISOnum
- 'sigma': '&#963;', # greek small letter sigma, U+03C3 ISOgrk3
- 'sigmaf': '&#962;', # greek small letter final sigma, U+03C2 ISOgrk3
- 'sim': '&#8764;', # tilde operator = varies with = similar to, U+223C ISOtech
- 'spades': '&#9824;', # black spade suit, U+2660 ISOpub
- 'sub': '&#8834;', # subset of, U+2282 ISOtech
- 'sube': '&#8838;', # subset of or equal to, U+2286 ISOtech
- 'sum': '&#8721;', # n-ary sumation, U+2211 ISOamsb
- 'sup': '&#8835;', # superset of, U+2283 ISOtech
- 'sup1': '\271', # superscript one = superscript digit one, U+00B9 ISOnum
- 'sup2': '\262', # superscript two = superscript digit two = squared, U+00B2 ISOnum
- 'sup3': '\263', # superscript three = superscript digit three = cubed, U+00B3 ISOnum
- 'supe': '&#8839;', # superset of or equal to, U+2287 ISOtech
- 'szlig': '\337', # latin small letter sharp s = ess-zed, U+00DF ISOlat1
- 'tau': '&#964;', # greek small letter tau, U+03C4 ISOgrk3
- 'there4': '&#8756;', # therefore, U+2234 ISOtech
- 'theta': '&#952;', # greek small letter theta, U+03B8 ISOgrk3
- 'thetasym': '&#977;', # greek small letter theta symbol, U+03D1 NEW
- 'thinsp': '&#8201;', # thin space, U+2009 ISOpub
- 'thorn': '\376', # latin small letter thorn with, U+00FE ISOlat1
- 'tilde': '&#732;', # small tilde, U+02DC ISOdia
- 'times': '\327', # multiplication sign, U+00D7 ISOnum
- 'trade': '&#8482;', # trade mark sign, U+2122 ISOnum
- 'uArr': '&#8657;', # upwards double arrow, U+21D1 ISOamsa
- 'uacute': '\372', # latin small letter u with acute, U+00FA ISOlat1
- 'uarr': '&#8593;', # upwards arrow, U+2191 ISOnum
- 'ucirc': '\373', # latin small letter u with circumflex, U+00FB ISOlat1
- 'ugrave': '\371', # latin small letter u with grave, U+00F9 ISOlat1
- 'uml': '\250', # diaeresis = spacing diaeresis, U+00A8 ISOdia
- 'upsih': '&#978;', # greek upsilon with hook symbol, U+03D2 NEW
- 'upsilon': '&#965;', # greek small letter upsilon, U+03C5 ISOgrk3
- 'uuml': '\374', # latin small letter u with diaeresis, U+00FC ISOlat1
- 'weierp': '&#8472;', # script capital P = power set = Weierstrass p, U+2118 ISOamso
- 'xi': '&#958;', # greek small letter xi, U+03BE ISOgrk3
- 'yacute': '\375', # latin small letter y with acute, U+00FD ISOlat1
- 'yen': '\245', # yen sign = yuan sign, U+00A5 ISOnum
- 'yuml': '\377', # latin small letter y with diaeresis, U+00FF ISOlat1
- 'zeta': '&#950;', # greek small letter zeta, U+03B6 ISOgrk3
- 'zwj': '&#8205;', # zero width joiner, U+200D NEW RFC 2070
- 'zwnj': '&#8204;', # zero width non-joiner, U+200C NEW RFC 2070
-
-}
diff --git a/Lib/dos-8x3/linecach.py b/Lib/dos-8x3/linecach.py
deleted file mode 100755
index bca40b2..0000000
--- a/Lib/dos-8x3/linecach.py
+++ /dev/null
@@ -1,92 +0,0 @@
-"""Cache lines from files.
-
-This is intended to read lines from modules imported -- hence if a filename
-is not found, it will look down the module search path for a file by
-that name.
-"""
-
-import sys
-import os
-from stat import *
-
-def getline(filename, lineno):
- lines = getlines(filename)
- if 1 <= lineno <= len(lines):
- return lines[lineno-1]
- else:
- return ''
-
-
-# The cache
-
-cache = {} # The cache
-
-
-def clearcache():
- """Clear the cache entirely."""
-
- global cache
- cache = {}
-
-
-def getlines(filename):
- """Get the lines for a file from the cache.
- Update the cache if it doesn't contain an entry for this file already."""
-
- if cache.has_key(filename):
- return cache[filename][2]
- else:
- return updatecache(filename)
-
-
-def checkcache():
- """Discard cache entries that are out of date.
- (This is not checked upon each call!)"""
-
- for filename in cache.keys():
- size, mtime, lines, fullname = cache[filename]
- try:
- stat = os.stat(fullname)
- except os.error:
- del cache[filename]
- continue
- if size <> stat[ST_SIZE] or mtime <> stat[ST_MTIME]:
- del cache[filename]
-
-
-def updatecache(filename):
- """Update a cache entry and return its list of lines.
- If something's wrong, print a message, discard the cache entry,
- and return an empty list."""
-
- if cache.has_key(filename):
- del cache[filename]
- if not filename or filename[0] + filename[-1] == '<>':
- return []
- fullname = filename
- try:
- stat = os.stat(fullname)
- except os.error, msg:
- # Try looking through the module search path
- basename = os.path.split(filename)[1]
- for dirname in sys.path:
- fullname = os.path.join(dirname, basename)
- try:
- stat = os.stat(fullname)
- break
- except os.error:
- pass
- else:
- # No luck
-## print '*** Cannot stat', filename, ':', msg
- return []
- try:
- fp = open(fullname, 'r')
- lines = fp.readlines()
- fp.close()
- except IOError, msg:
-## print '*** Cannot open', fullname, ':', msg
- return []
- size, mtime = stat[ST_SIZE], stat[ST_MTIME]
- cache[filename] = size, mtime, lines, fullname
- return lines
diff --git a/Lib/dos-8x3/macurl2p.py b/Lib/dos-8x3/macurl2p.py
deleted file mode 100755
index c971eda..0000000
--- a/Lib/dos-8x3/macurl2p.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""Macintosh-specific module for conversion between pathnames and URLs.
-
-Do not import directly; use urllib instead."""
-
-import string
-import urllib
-import os
-
-def url2pathname(pathname):
- "Convert /-delimited pathname to mac pathname"
- #
- # XXXX The .. handling should be fixed...
- #
- tp = urllib.splittype(pathname)[0]
- if tp and tp <> 'file':
- raise RuntimeError, 'Cannot convert non-local URL to pathname'
- # Turn starting /// into /, an empty hostname means current host
- if pathname[:3] == '///':
- pathname = pathname[2:]
- elif pathname[:2] == '//':
- raise RuntimeError, 'Cannot convert non-local URL to pathname'
- components = string.split(pathname, '/')
- # Remove . and embedded ..
- i = 0
- while i < len(components):
- if components[i] == '.':
- del components[i]
- elif components[i] == '..' and i > 0 and \
- components[i-1] not in ('', '..'):
- del components[i-1:i+1]
- i = i-1
- elif components[i] == '' and i > 0 and components[i-1] <> '':
- del components[i]
- else:
- i = i+1
- if not components[0]:
- # Absolute unix path, don't start with colon
- rv = string.join(components[1:], ':')
- else:
- # relative unix path, start with colon. First replace
- # leading .. by empty strings (giving ::file)
- i = 0
- while i < len(components) and components[i] == '..':
- components[i] = ''
- i = i + 1
- rv = ':' + string.join(components, ':')
- # and finally unquote slashes and other funny characters
- return urllib.unquote(rv)
-
-def pathname2url(pathname):
- "convert mac pathname to /-delimited pathname"
- if '/' in pathname:
- raise RuntimeError, "Cannot convert pathname containing slashes"
- components = string.split(pathname, ':')
- # Remove empty first and/or last component
- if components[0] == '':
- del components[0]
- if components[-1] == '':
- del components[-1]
- # Replace empty string ('::') by .. (will result in '/../' later)
- for i in range(len(components)):
- if components[i] == '':
- components[i] = '..'
- # Truncate names longer than 31 bytes
- components = map(_pncomp2url, components)
-
- if os.path.isabs(pathname):
- return '/' + string.join(components, '/')
- else:
- return string.join(components, '/')
-
-def _pncomp2url(component):
- component = urllib.quote(component[:31], safe='') # We want to quote slashes
- return component
-
-def test():
- for url in ["index.html",
- "bar/index.html",
- "/foo/bar/index.html",
- "/foo/bar/",
- "/"]:
- print `url`, '->', `url2pathname(url)`
- for path in ["drive:",
- "drive:dir:",
- "drive:dir:file",
- "drive:file",
- "file",
- ":file",
- ":dir:",
- ":dir:file"]:
- print `path`, '->', `pathname2url(path)`
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/mimetool.py b/Lib/dos-8x3/mimetool.py
deleted file mode 100755
index da955af..0000000
--- a/Lib/dos-8x3/mimetool.py
+++ /dev/null
@@ -1,229 +0,0 @@
-"""Various tools used by MIME-reading or MIME-writing programs."""
-
-
-import os
-import rfc822
-import string
-import tempfile
-
-
-class Message(rfc822.Message):
- """A derived class of rfc822.Message that knows about MIME headers and
- contains some hooks for decoding encoded and multipart messages."""
-
- def __init__(self, fp, seekable = 1):
- rfc822.Message.__init__(self, fp, seekable)
- self.encodingheader = \
- self.getheader('content-transfer-encoding')
- self.typeheader = \
- self.getheader('content-type')
- self.parsetype()
- self.parseplist()
-
- def parsetype(self):
- str = self.typeheader
- if str == None:
- str = 'text/plain'
- if ';' in str:
- i = string.index(str, ';')
- self.plisttext = str[i:]
- str = str[:i]
- else:
- self.plisttext = ''
- fields = string.splitfields(str, '/')
- for i in range(len(fields)):
- fields[i] = string.lower(string.strip(fields[i]))
- self.type = string.joinfields(fields, '/')
- self.maintype = fields[0]
- self.subtype = string.joinfields(fields[1:], '/')
-
- def parseplist(self):
- str = self.plisttext
- self.plist = []
- while str[:1] == ';':
- str = str[1:]
- if ';' in str:
- # XXX Should parse quotes!
- end = string.index(str, ';')
- else:
- end = len(str)
- f = str[:end]
- if '=' in f:
- i = string.index(f, '=')
- f = string.lower(string.strip(f[:i])) + \
- '=' + string.strip(f[i+1:])
- self.plist.append(string.strip(f))
- str = str[end:]
-
- def getplist(self):
- return self.plist
-
- def getparam(self, name):
- name = string.lower(name) + '='
- n = len(name)
- for p in self.plist:
- if p[:n] == name:
- return rfc822.unquote(p[n:])
- return None
-
- def getparamnames(self):
- result = []
- for p in self.plist:
- i = string.find(p, '=')
- if i >= 0:
- result.append(string.lower(p[:i]))
- return result
-
- def getencoding(self):
- if self.encodingheader == None:
- return '7bit'
- return string.lower(self.encodingheader)
-
- def gettype(self):
- return self.type
-
- def getmaintype(self):
- return self.maintype
-
- def getsubtype(self):
- return self.subtype
-
-
-
-
-# Utility functions
-# -----------------
-
-
-_prefix = None
-
-def choose_boundary():
- """Return a random string usable as a multipart boundary.
- The method used is so that it is *very* unlikely that the same
- string of characters will every occur again in the Universe,
- so the caller needn't check the data it is packing for the
- occurrence of the boundary.
-
- The boundary contains dots so you have to quote it in the header."""
-
- global _prefix
- import time
- import random
- if _prefix == None:
- import socket
- import os
- hostid = socket.gethostbyname(socket.gethostname())
- try:
- uid = `os.getuid()`
- except:
- uid = '1'
- try:
- pid = `os.getpid()`
- except:
- pid = '1'
- _prefix = hostid + '.' + uid + '.' + pid
- timestamp = '%.3f' % time.time()
- seed = `random.randint(0, 32767)`
- return _prefix + '.' + timestamp + '.' + seed
-
-
-# Subroutines for decoding some common content-transfer-types
-
-def decode(input, output, encoding):
- """Decode common content-transfer-encodings (base64, quopri, uuencode)."""
- if encoding == 'base64':
- import base64
- return base64.decode(input, output)
- if encoding == 'quoted-printable':
- import quopri
- return quopri.decode(input, output)
- if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
- import uu
- return uu.decode(input, output)
- if encoding in ('7bit', '8bit'):
- return output.write(input.read())
- if decodetab.has_key(encoding):
- pipethrough(input, decodetab[encoding], output)
- else:
- raise ValueError, \
- 'unknown Content-Transfer-Encoding: %s' % encoding
-
-def encode(input, output, encoding):
- """Encode common content-transfer-encodings (base64, quopri, uuencode)."""
- if encoding == 'base64':
- import base64
- return base64.encode(input, output)
- if encoding == 'quoted-printable':
- import quopri
- return quopri.encode(input, output, 0)
- if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
- import uu
- return uu.encode(input, output)
- if encoding in ('7bit', '8bit'):
- return output.write(input.read())
- if encodetab.has_key(encoding):
- pipethrough(input, encodetab[encoding], output)
- else:
- raise ValueError, \
- 'unknown Content-Transfer-Encoding: %s' % encoding
-
-# The following is no longer used for standard encodings
-
-# XXX This requires that uudecode and mmencode are in $PATH
-
-uudecode_pipe = '''(
-TEMP=/tmp/@uu.$$
-sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
-cat $TEMP
-rm $TEMP
-)'''
-
-decodetab = {
- 'uuencode': uudecode_pipe,
- 'x-uuencode': uudecode_pipe,
- 'uue': uudecode_pipe,
- 'x-uue': uudecode_pipe,
- 'quoted-printable': 'mmencode -u -q',
- 'base64': 'mmencode -u -b',
-}
-
-encodetab = {
- 'x-uuencode': 'uuencode tempfile',
- 'uuencode': 'uuencode tempfile',
- 'x-uue': 'uuencode tempfile',
- 'uue': 'uuencode tempfile',
- 'quoted-printable': 'mmencode -q',
- 'base64': 'mmencode -b',
-}
-
-def pipeto(input, command):
- pipe = os.popen(command, 'w')
- copyliteral(input, pipe)
- pipe.close()
-
-def pipethrough(input, command, output):
- tempname = tempfile.mktemp()
- try:
- temp = open(tempname, 'w')
- except IOError:
- print '*** Cannot create temp file', `tempname`
- return
- copyliteral(input, temp)
- temp.close()
- pipe = os.popen(command + ' <' + tempname, 'r')
- copybinary(pipe, output)
- pipe.close()
- os.unlink(tempname)
-
-def copyliteral(input, output):
- while 1:
- line = input.readline()
- if not line: break
- output.write(line)
-
-def copybinary(input, output):
- BUFSIZE = 8192
- while 1:
- line = input.read(BUFSIZE)
- if not line: break
- output.write(line)
diff --git a/Lib/dos-8x3/mimetype.py b/Lib/dos-8x3/mimetype.py
deleted file mode 100644
index 9dc3645..0000000
--- a/Lib/dos-8x3/mimetype.py
+++ /dev/null
@@ -1,237 +0,0 @@
-"""Guess the MIME type of a file.
-
-This module defines two useful functions:
-
-guess_type(url) -- guess the MIME type and encoding of a URL.
-
-guess_extension(type) -- guess the extension for a given MIME type.
-
-It also contains the following, for tuning the behavior:
-
-Data:
-
-knownfiles -- list of files to parse
-inited -- flag set when init() has been called
-suffixes_map -- dictionary mapping suffixes to suffixes
-encodings_map -- dictionary mapping suffixes to encodings
-types_map -- dictionary mapping suffixes to types
-
-Functions:
-
-init([files]) -- parse a list of files, default knownfiles
-read_mime_types(file) -- parse one file, return a dictionary or None
-
-"""
-
-import string
-import posixpath
-import urllib
-
-knownfiles = [
- "/usr/local/etc/httpd/conf/mime.types",
- "/usr/local/lib/netscape/mime.types",
- "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
- "/usr/local/etc/mime.types", # Apache 1.3
- ]
-
-inited = 0
-
-def guess_type(url):
- """Guess the type of a file based on its URL.
-
- Return value is a tuple (type, encoding) where type is None if the
- type can't be guessed (no or unknown suffix) or a string of the
- form type/subtype, usable for a MIME Content-type header; and
- encoding is None for no encoding or the name of the program used
- to encode (e.g. compress or gzip). The mappings are table
- driven. Encoding suffixes are case sensitive; type suffixes are
- first tried case sensitive, then case insensitive.
-
- The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
- to ".tar.gz". (This is table-driven too, using the dictionary
- suffix_map).
-
- """
- if not inited:
- init()
- scheme, url = urllib.splittype(url)
- if scheme == 'data':
- # syntax of data URLs:
- # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
- # mediatype := [ type "/" subtype ] *( ";" parameter )
- # data := *urlchar
- # parameter := attribute "=" value
- # type/subtype defaults to "text/plain"
- comma = string.find(url, ',')
- if comma < 0:
- # bad data URL
- return None, None
- semi = string.find(url, ';', 0, comma)
- if semi >= 0:
- type = url[:semi]
- else:
- type = url[:comma]
- if '=' in type or '/' not in type:
- type = 'text/plain'
- return type, None # never compressed, so encoding is None
- base, ext = posixpath.splitext(url)
- while suffix_map.has_key(ext):
- base, ext = posixpath.splitext(base + suffix_map[ext])
- if encodings_map.has_key(ext):
- encoding = encodings_map[ext]
- base, ext = posixpath.splitext(base)
- else:
- encoding = None
- if types_map.has_key(ext):
- return types_map[ext], encoding
- elif types_map.has_key(string.lower(ext)):
- return types_map[string.lower(ext)], encoding
- else:
- return None, encoding
-
-def guess_extension(type):
- """Guess the extension for a file based on its MIME type.
-
- Return value is a string giving a filename extension, including the
- leading dot ('.'). The extension is not guaranteed to have been
- associated with any particular data stream, but would be mapped to the
- MIME type `type' by guess_type(). If no extension can be guessed for
- `type', None is returned.
- """
- global inited
- if not inited:
- init()
- type = string.lower(type)
- for ext, stype in types_map.items():
- if type == stype:
- return ext
- return None
-
-def init(files=None):
- global inited
- for file in files or knownfiles:
- s = read_mime_types(file)
- if s:
- for key, value in s.items():
- types_map[key] = value
- inited = 1
-
-def read_mime_types(file):
- try:
- f = open(file)
- except IOError:
- return None
- map = {}
- while 1:
- line = f.readline()
- if not line: break
- words = string.split(line)
- for i in range(len(words)):
- if words[i][0] == '#':
- del words[i:]
- break
- if not words: continue
- type, suffixes = words[0], words[1:]
- for suff in suffixes:
- map['.'+suff] = type
- f.close()
- return map
-
-suffix_map = {
- '.tgz': '.tar.gz',
- '.taz': '.tar.gz',
- '.tz': '.tar.gz',
-}
-
-encodings_map = {
- '.gz': 'gzip',
- '.Z': 'compress',
- }
-
-types_map = {
- '.a': 'application/octet-stream',
- '.ai': 'application/postscript',
- '.aif': 'audio/x-aiff',
- '.aifc': 'audio/x-aiff',
- '.aiff': 'audio/x-aiff',
- '.au': 'audio/basic',
- '.avi': 'video/x-msvideo',
- '.bcpio': 'application/x-bcpio',
- '.bin': 'application/octet-stream',
- '.cdf': 'application/x-netcdf',
- '.cpio': 'application/x-cpio',
- '.csh': 'application/x-csh',
- '.dll': 'application/octet-stream',
- '.dvi': 'application/x-dvi',
- '.exe': 'application/octet-stream',
- '.eps': 'application/postscript',
- '.etx': 'text/x-setext',
- '.gif': 'image/gif',
- '.gtar': 'application/x-gtar',
- '.hdf': 'application/x-hdf',
- '.htm': 'text/html',
- '.html': 'text/html',
- '.ief': 'image/ief',
- '.jpe': 'image/jpeg',
- '.jpeg': 'image/jpeg',
- '.jpg': 'image/jpeg',
- '.js': 'application/x-javascript',
- '.latex': 'application/x-latex',
- '.man': 'application/x-troff-man',
- '.me': 'application/x-troff-me',
- '.mif': 'application/x-mif',
- '.mov': 'video/quicktime',
- '.movie': 'video/x-sgi-movie',
- '.mpe': 'video/mpeg',
- '.mpeg': 'video/mpeg',
- '.mpg': 'video/mpeg',
- '.ms': 'application/x-troff-ms',
- '.nc': 'application/x-netcdf',
- '.o': 'application/octet-stream',
- '.obj': 'application/octet-stream',
- '.oda': 'application/oda',
- '.pbm': 'image/x-portable-bitmap',
- '.pdf': 'application/pdf',
- '.pgm': 'image/x-portable-graymap',
- '.pnm': 'image/x-portable-anymap',
- '.png': 'image/png',
- '.ppm': 'image/x-portable-pixmap',
- '.py': 'text/x-python',
- '.pyc': 'application/x-python-code',
- '.ps': 'application/postscript',
- '.qt': 'video/quicktime',
- '.ras': 'image/x-cmu-raster',
- '.rgb': 'image/x-rgb',
- '.rdf': 'application/xml',
- '.roff': 'application/x-troff',
- '.rtf': 'application/rtf',
- '.rtx': 'text/richtext',
- '.sgm': 'text/x-sgml',
- '.sgml': 'text/x-sgml',
- '.sh': 'application/x-sh',
- '.shar': 'application/x-shar',
- '.snd': 'audio/basic',
- '.so': 'application/octet-stream',
- '.src': 'application/x-wais-source',
- '.sv4cpio': 'application/x-sv4cpio',
- '.sv4crc': 'application/x-sv4crc',
- '.t': 'application/x-troff',
- '.tar': 'application/x-tar',
- '.tcl': 'application/x-tcl',
- '.tex': 'application/x-tex',
- '.texi': 'application/x-texinfo',
- '.texinfo': 'application/x-texinfo',
- '.tif': 'image/tiff',
- '.tiff': 'image/tiff',
- '.tr': 'application/x-troff',
- '.tsv': 'text/tab-separated-values',
- '.txt': 'text/plain',
- '.ustar': 'application/x-ustar',
- '.wav': 'audio/x-wav',
- '.xbm': 'image/x-xbitmap',
- '.xml': 'text/xml',
- '.xsl': 'application/xml',
- '.xpm': 'image/x-xpixmap',
- '.xwd': 'image/x-xwindowdump',
- '.zip': 'application/zip',
- }
diff --git a/Lib/dos-8x3/mimewrit.py b/Lib/dos-8x3/mimewrit.py
deleted file mode 100644
index 754576b..0000000
--- a/Lib/dos-8x3/mimewrit.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""Generic MIME writer.
-
-Classes:
-
-MimeWriter - the only thing here.
-
-"""
-
-
-import string
-import mimetools
-
-
-class MimeWriter:
-
- """Generic MIME writer.
-
- Methods:
-
- __init__()
- addheader()
- flushheaders()
- startbody()
- startmultipartbody()
- nextpart()
- lastpart()
-
- A MIME writer is much more primitive than a MIME parser. It
- doesn't seek around on the output file, and it doesn't use large
- amounts of buffer space, so you have to write the parts in the
- order they should occur on the output file. It does buffer the
- headers you add, allowing you to rearrange their order.
-
- General usage is:
-
- f = <open the output file>
- w = MimeWriter(f)
- ...call w.addheader(key, value) 0 or more times...
-
- followed by either:
-
- f = w.startbody(content_type)
- ...call f.write(data) for body data...
-
- or:
-
- w.startmultipartbody(subtype)
- for each part:
- subwriter = w.nextpart()
- ...use the subwriter's methods to create the subpart...
- w.lastpart()
-
- The subwriter is another MimeWriter instance, and should be
- treated in the same way as the toplevel MimeWriter. This way,
- writing recursive body parts is easy.
-
- Warning: don't forget to call lastpart()!
-
- XXX There should be more state so calls made in the wrong order
- are detected.
-
- Some special cases:
-
- - startbody() just returns the file passed to the constructor;
- but don't use this knowledge, as it may be changed.
-
- - startmultipartbody() actually returns a file as well;
- this can be used to write the initial 'if you can read this your
- mailer is not MIME-aware' message.
-
- - If you call flushheaders(), the headers accumulated so far are
- written out (and forgotten); this is useful if you don't need a
- body part at all, e.g. for a subpart of type message/rfc822
- that's (mis)used to store some header-like information.
-
- - Passing a keyword argument 'prefix=<flag>' to addheader(),
- start*body() affects where the header is inserted; 0 means
- append at the end, 1 means insert at the start; default is
- append for addheader(), but insert for start*body(), which use
- it to determine where the Content-Type header goes.
-
- """
-
- def __init__(self, fp):
- self._fp = fp
- self._headers = []
-
- def addheader(self, key, value, prefix=0):
- lines = string.splitfields(value, "\n")
- while lines and not lines[-1]: del lines[-1]
- while lines and not lines[0]: del lines[0]
- for i in range(1, len(lines)):
- lines[i] = " " + string.strip(lines[i])
- value = string.joinfields(lines, "\n") + "\n"
- line = key + ": " + value
- if prefix:
- self._headers.insert(0, line)
- else:
- self._headers.append(line)
-
- def flushheaders(self):
- self._fp.writelines(self._headers)
- self._headers = []
-
- def startbody(self, ctype, plist=[], prefix=1):
- for name, value in plist:
- ctype = ctype + ';\n %s=\"%s\"' % (name, value)
- self.addheader("Content-Type", ctype, prefix=prefix)
- self.flushheaders()
- self._fp.write("\n")
- return self._fp
-
- def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
- self._boundary = boundary or mimetools.choose_boundary()
- return self.startbody("multipart/" + subtype,
- [("boundary", self._boundary)] + plist,
- prefix=prefix)
-
- def nextpart(self):
- self._fp.write("\n--" + self._boundary + "\n")
- return self.__class__(self._fp)
-
- def lastpart(self):
- self._fp.write("\n--" + self._boundary + "--\n")
-
-
-if __name__ == '__main__':
- import test.test_MimeWriter
diff --git a/Lib/dos-8x3/multifil.py b/Lib/dos-8x3/multifil.py
deleted file mode 100755
index e43d331..0000000
--- a/Lib/dos-8x3/multifil.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""A readline()-style interface to the parts of a multipart message.
-
-The MultiFile class makes each part of a multipart message "feel" like
-an ordinary file, as long as you use fp.readline(). Allows recursive
-use, for nested multipart messages. Probably best used together
-with module mimetools.
-
-Suggested use:
-
-real_fp = open(...)
-fp = MultiFile(real_fp)
-
-"read some lines from fp"
-fp.push(separator)
-while 1:
- "read lines from fp until it returns an empty string" (A)
- if not fp.next(): break
-fp.pop()
-"read remaining lines from fp until it returns an empty string"
-
-The latter sequence may be used recursively at (A).
-It is also allowed to use multiple push()...pop() sequences.
-
-If seekable is given as 0, the class code will not do the bookkeeping
-it normally attempts in order to make seeks relative to the beginning of the
-current file part. This may be useful when using MultiFile with a non-
-seekable stream object.
-"""
-
-import sys
-import string
-
-class Error(Exception):
- pass
-
-class MultiFile:
-
- seekable = 0
-
- def __init__(self, fp, seekable=1):
- self.fp = fp
- self.stack = [] # Grows down
- self.level = 0
- self.last = 0
- if seekable:
- self.seekable = 1
- self.start = self.fp.tell()
- self.posstack = [] # Grows down
-
- def tell(self):
- if self.level > 0:
- return self.lastpos
- return self.fp.tell() - self.start
-
- def seek(self, pos, whence=0):
- here = self.tell()
- if whence:
- if whence == 1:
- pos = pos + here
- elif whence == 2:
- if self.level > 0:
- pos = pos + self.lastpos
- else:
- raise Error, "can't use whence=2 yet"
- if not 0 <= pos <= here or \
- self.level > 0 and pos > self.lastpos:
- raise Error, 'bad MultiFile.seek() call'
- self.fp.seek(pos + self.start)
- self.level = 0
- self.last = 0
-
- def readline(self):
- if self.level > 0:
- return ''
- line = self.fp.readline()
- # Real EOF?
- if not line:
- self.level = len(self.stack)
- self.last = (self.level > 0)
- if self.last:
- raise Error, 'sudden EOF in MultiFile.readline()'
- return ''
- assert self.level == 0
- # Fast check to see if this is just data
- if self.is_data(line):
- return line
- else:
- # Ignore trailing whitespace on marker lines
- k = len(line) - 1;
- while line[k] in string.whitespace:
- k = k - 1
- marker = line[:k+1]
- # No? OK, try to match a boundary.
- # Return the line (unstripped) if we don't.
- for i in range(len(self.stack)):
- sep = self.stack[i]
- if marker == self.section_divider(sep):
- self.last = 0
- break
- elif marker == self.end_marker(sep):
- self.last = 1
- break
- else:
- return line
- # We only get here if we see a section divider or EOM line
- if self.seekable:
- self.lastpos = self.tell() - len(line)
- self.level = i+1
- if self.level > 1:
- raise Error,'Missing endmarker in MultiFile.readline()'
- return ''
-
- def readlines(self):
- list = []
- while 1:
- line = self.readline()
- if not line: break
- list.append(line)
- return list
-
- def read(self): # Note: no size argument -- read until EOF only!
- return string.joinfields(self.readlines(), '')
-
- def next(self):
- while self.readline(): pass
- if self.level > 1 or self.last:
- return 0
- self.level = 0
- self.last = 0
- if self.seekable:
- self.start = self.fp.tell()
- return 1
-
- def push(self, sep):
- if self.level > 0:
- raise Error, 'bad MultiFile.push() call'
- self.stack.insert(0, sep)
- if self.seekable:
- self.posstack.insert(0, self.start)
- self.start = self.fp.tell()
-
- def pop(self):
- if self.stack == []:
- raise Error, 'bad MultiFile.pop() call'
- if self.level <= 1:
- self.last = 0
- else:
- abslastpos = self.lastpos + self.start
- self.level = max(0, self.level - 1)
- del self.stack[0]
- if self.seekable:
- self.start = self.posstack[0]
- del self.posstack[0]
- if self.level > 0:
- self.lastpos = abslastpos - self.start
-
- def is_data(self, line):
- return line[:2] <> '--'
-
- def section_divider(self, str):
- return "--" + str
-
- def end_marker(self, str):
- return "--" + str + "--"
diff --git a/Lib/dos-8x3/nturl2pa.py b/Lib/dos-8x3/nturl2pa.py
deleted file mode 100755
index 0445b8a..0000000
--- a/Lib/dos-8x3/nturl2pa.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Convert a NT pathname to a file URL and vice versa."""
-
-def url2pathname(url):
- r"""Convert a URL to a DOS path.
-
- ///C|/foo/bar/spam.foo
-
- becomes
-
- C:\foo\bar\spam.foo
- """
- import string, urllib
- if not '|' in url:
- # No drive specifier, just convert slashes
- if url[:4] == '////':
- # path is something like ////host/path/on/remote/host
- # convert this to \\host\path\on\remote\host
- # (notice halving of slashes at the start of the path)
- url = url[2:]
- components = string.split(url, '/')
- # make sure not to convert quoted slashes :-)
- return urllib.unquote(string.join(components, '\\'))
- comp = string.split(url, '|')
- if len(comp) != 2 or comp[0][-1] not in string.letters:
- error = 'Bad URL: ' + url
- raise IOError, error
- drive = string.upper(comp[0][-1])
- components = string.split(comp[1], '/')
- path = drive + ':'
- for comp in components:
- if comp:
- path = path + '\\' + urllib.unquote(comp)
- return path
-
-def pathname2url(p):
- r"""Convert a DOS path name to a file url.
-
- C:\foo\bar\spam.foo
-
- becomes
-
- ///C|/foo/bar/spam.foo
- """
-
- import string, urllib
- if not ':' in p:
- # No drive specifier, just convert slashes and quote the name
- if p[:2] == '\\\\':
- # path is something like \\host\path\on\remote\host
- # convert this to ////host/path/on/remote/host
- # (notice doubling of slashes at the start of the path)
- p = '\\\\' + p
- components = string.split(p, '\\')
- return urllib.quote(string.join(components, '/'))
- comp = string.split(p, ':')
- if len(comp) != 2 or len(comp[0]) > 1:
- error = 'Bad path: ' + p
- raise IOError, error
-
- drive = urllib.quote(string.upper(comp[0]))
- components = string.split(comp[1], '\\')
- path = '///' + drive + '|'
- for comp in components:
- if comp:
- path = path + '/' + urllib.quote(comp)
- return path
diff --git a/Lib/dos-8x3/posixfil.py b/Lib/dos-8x3/posixfil.py
deleted file mode 100755
index 23f2c85..0000000
--- a/Lib/dos-8x3/posixfil.py
+++ /dev/null
@@ -1,229 +0,0 @@
-"""Extended file operations available in POSIX.
-
-f = posixfile.open(filename, [mode, [bufsize]])
- will create a new posixfile object
-
-f = posixfile.fileopen(fileobject)
- will create a posixfile object from a builtin file object
-
-f.file()
- will return the original builtin file object
-
-f.dup()
- will return a new file object based on a new filedescriptor
-
-f.dup2(fd)
- will return a new file object based on the given filedescriptor
-
-f.flags(mode)
- will turn on the associated flag (merge)
- mode can contain the following characters:
-
- (character representing a flag)
- a append only flag
- c close on exec flag
- n no delay flag
- s synchronization flag
- (modifiers)
- ! turn flags 'off' instead of default 'on'
- = copy flags 'as is' instead of default 'merge'
- ? return a string in which the characters represent the flags
- that are set
-
- note: - the '!' and '=' modifiers are mutually exclusive.
- - the '?' modifier will return the status of the flags after they
- have been changed by other characters in the mode string
-
-f.lock(mode [, len [, start [, whence]]])
- will (un)lock a region
- mode can contain the following characters:
-
- (character representing type of lock)
- u unlock
- r read lock
- w write lock
- (modifiers)
- | wait until the lock can be granted
- ? return the first lock conflicting with the requested lock
- or 'None' if there is no conflict. The lock returned is in the
- format (mode, len, start, whence, pid) where mode is a
- character representing the type of lock ('r' or 'w')
-
- note: - the '?' modifier prevents a region from being locked; it is
- query only
-"""
-
-class _posixfile_:
- """File wrapper class that provides extra POSIX file routines."""
-
- states = ['open', 'closed']
-
- #
- # Internal routines
- #
- def __repr__(self):
- file = self._file_
- return "<%s posixfile '%s', mode '%s' at %s>" % \
- (self.states[file.closed], file.name, file.mode, \
- hex(id(self))[2:])
-
- #
- # Initialization routines
- #
- def open(self, name, mode='r', bufsize=-1):
- import __builtin__
- return self.fileopen(__builtin__.open(name, mode, bufsize))
-
- def fileopen(self, file):
- if `type(file)` != "<type 'file'>":
- raise TypeError, 'posixfile.fileopen() arg must be file object'
- self._file_ = file
- # Copy basic file methods
- for method in file.__methods__:
- setattr(self, method, getattr(file, method))
- return self
-
- #
- # New methods
- #
- def file(self):
- return self._file_
-
- def dup(self):
- import posix
-
- try: ignore = posix.fdopen
- except: raise AttributeError, 'dup() method unavailable'
-
- return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
-
- def dup2(self, fd):
- import posix
-
- try: ignore = posix.fdopen
- except: raise AttributeError, 'dup() method unavailable'
-
- posix.dup2(self._file_.fileno(), fd)
- return posix.fdopen(fd, self._file_.mode)
-
- def flags(self, *which):
- import fcntl, FCNTL
-
- if which:
- if len(which) > 1:
- raise TypeError, 'Too many arguments'
- which = which[0]
- else: which = '?'
-
- l_flags = 0
- if 'n' in which: l_flags = l_flags | FCNTL.O_NDELAY
- if 'a' in which: l_flags = l_flags | FCNTL.O_APPEND
- if 's' in which: l_flags = l_flags | FCNTL.O_SYNC
-
- file = self._file_
-
- if '=' not in which:
- cur_fl = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
- if '!' in which: l_flags = cur_fl & ~ l_flags
- else: l_flags = cur_fl | l_flags
-
- l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFL, l_flags)
-
- if 'c' in which:
- arg = ('!' not in which) # 0 is don't, 1 is do close on exec
- l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFD, arg)
-
- if '?' in which:
- which = '' # Return current flags
- l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
- if FCNTL.O_APPEND & l_flags: which = which + 'a'
- if fcntl.fcntl(file.fileno(), FCNTL.F_GETFD, 0) & 1:
- which = which + 'c'
- if FCNTL.O_NDELAY & l_flags: which = which + 'n'
- if FCNTL.O_SYNC & l_flags: which = which + 's'
- return which
-
- def lock(self, how, *args):
- import struct, fcntl, FCNTL
-
- if 'w' in how: l_type = FCNTL.F_WRLCK
- elif 'r' in how: l_type = FCNTL.F_RDLCK
- elif 'u' in how: l_type = FCNTL.F_UNLCK
- else: raise TypeError, 'no type of lock specified'
-
- if '|' in how: cmd = FCNTL.F_SETLKW
- elif '?' in how: cmd = FCNTL.F_GETLK
- else: cmd = FCNTL.F_SETLK
-
- l_whence = 0
- l_start = 0
- l_len = 0
-
- if len(args) == 1:
- l_len = args[0]
- elif len(args) == 2:
- l_len, l_start = args
- elif len(args) == 3:
- l_len, l_start, l_whence = args
- elif len(args) > 3:
- raise TypeError, 'too many arguments'
-
- # Hack by davem@magnet.com to get locking to go on freebsd;
- # additions for AIX by Vladimir.Marangozov@imag.fr
- import sys, os
- if sys.platform in ('netbsd1',
- 'openbsd2',
- 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
- 'bsdos2', 'bsdos3', 'bsdos4'):
- flock = struct.pack('lxxxxlxxxxlhh', \
- l_start, l_len, os.getpid(), l_type, l_whence)
- elif sys.platform in ['aix3', 'aix4']:
- flock = struct.pack('hhlllii', \
- l_type, l_whence, l_start, l_len, 0, 0, 0)
- else:
- flock = struct.pack('hhllhh', \
- l_type, l_whence, l_start, l_len, 0, 0)
-
- flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
-
- if '?' in how:
- if sys.platform in ('netbsd1',
- 'openbsd2',
- 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
- 'bsdos2', 'bsdos3', 'bsdos4'):
- l_start, l_len, l_pid, l_type, l_whence = \
- struct.unpack('lxxxxlxxxxlhh', flock)
- elif sys.platform in ['aix3', 'aix4']:
- l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
- struct.unpack('hhlllii', flock)
- elif sys.platform == "linux2":
- l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
- struct.unpack('hhllhh', flock)
- else:
- l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
- struct.unpack('hhllhh', flock)
-
- if l_type != FCNTL.F_UNLCK:
- if l_type == FCNTL.F_RDLCK:
- return 'r', l_len, l_start, l_whence, l_pid
- else:
- return 'w', l_len, l_start, l_whence, l_pid
-
-def open(name, mode='r', bufsize=-1):
- """Public routine to open a file as a posixfile object."""
- return _posixfile_().open(name, mode, bufsize)
-
-def fileopen(file):
- """Public routine to get a posixfile object from a Python file object."""
- return _posixfile_().fileopen(file)
-
-#
-# Constants
-#
-SEEK_SET = 0
-SEEK_CUR = 1
-SEEK_END = 2
-
-#
-# End of posixfile.py
-#
diff --git a/Lib/dos-8x3/posixpat.py b/Lib/dos-8x3/posixpat.py
deleted file mode 100755
index 2826604..0000000
--- a/Lib/dos-8x3/posixpat.py
+++ /dev/null
@@ -1,368 +0,0 @@
-"""Common operations on Posix pathnames.
-
-Instead of importing this module directly, import os and refer to
-this module as os.path. The "os.path" name is an alias for this
-module on Posix systems; on other systems (e.g. Mac, Windows),
-os.path provides the same operations in a manner specific to that
-platform, and is an alias to another module (e.g. macpath, ntpath).
-
-Some of this can actually be useful on non-Posix systems too, e.g.
-for manipulation of the pathname component of URLs.
-"""
-
-import os
-import stat
-
-
-# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
-# On MS-DOS this may also turn slashes into backslashes; however, other
-# normalizations (such as optimizing '../' away) are not allowed
-# (another function should be defined to do that).
-
-def normcase(s):
- """Normalize case of pathname. Has no effect under Posix"""
- return s
-
-
-# Return whether a path is absolute.
-# Trivial in Posix, harder on the Mac or MS-DOS.
-
-def isabs(s):
- """Test whether a path is absolute"""
- return s[:1] == '/'
-
-
-# Join pathnames.
-# Ignore the previous parts if a part is absolute.
-# Insert a '/' unless the first part is empty or already ends in '/'.
-
-def join(a, *p):
- """Join two or more pathname components, inserting '/' as needed"""
- path = a
- for b in p:
- if b[:1] == '/':
- path = b
- elif path == '' or path[-1:] == '/':
- path = path + b
- else:
- path = path + '/' + b
- return path
-
-
-# Split a path in head (everything up to the last '/') and tail (the
-# rest). If the path ends in '/', tail will be empty. If there is no
-# '/' in the path, head will be empty.
-# Trailing '/'es are stripped from head unless it is the root.
-
-def split(p):
- """Split a pathname. Returns tuple "(head, tail)" where "tail" is
- everything after the final slash. Either part may be empty."""
- i = p.rfind('/') + 1
- head, tail = p[:i], p[i:]
- if head and head <> '/'*len(head):
- while head[-1] == '/':
- head = head[:-1]
- return head, tail
-
-
-# Split a path in root and extension.
-# The extension is everything starting at the last dot in the last
-# pathname component; the root is everything before that.
-# It is always true that root + ext == p.
-
-def splitext(p):
- """Split the extension from a pathname. Extension is everything from the
- last dot to the end. Returns "(root, ext)", either part may be empty."""
- root, ext = '', ''
- for c in p:
- if c == '/':
- root, ext = root + ext + c, ''
- elif c == '.':
- if ext:
- root, ext = root + ext, c
- else:
- ext = c
- elif ext:
- ext = ext + c
- else:
- root = root + c
- return root, ext
-
-
-# Split a pathname into a drive specification and the rest of the
-# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
-
-def splitdrive(p):
- """Split a pathname into drive and path. On Posix, drive is always
- empty."""
- return '', p
-
-
-# Return the tail (basename) part of a path.
-
-def basename(p):
- """Returns the final component of a pathname"""
- return split(p)[1]
-
-
-# Return the head (dirname) part of a path.
-
-def dirname(p):
- """Returns the directory component of a pathname"""
- return split(p)[0]
-
-
-# Return the longest prefix of all list elements.
-
-def commonprefix(m):
- "Given a list of pathnames, returns the longest common leading component"
- if not m: return ''
- prefix = m[0]
- for item in m:
- for i in range(len(prefix)):
- if prefix[:i+1] <> item[:i+1]:
- prefix = prefix[:i]
- if i == 0: return ''
- break
- return prefix
-
-
-# Get size, mtime, atime of files.
-
-def getsize(filename):
- """Return the size of a file, reported by os.stat()."""
- st = os.stat(filename)
- return st[stat.ST_SIZE]
-
-def getmtime(filename):
- """Return the last modification time of a file, reported by os.stat()."""
- st = os.stat(filename)
- return st[stat.ST_MTIME]
-
-def getatime(filename):
- """Return the last access time of a file, reported by os.stat()."""
- st = os.stat(filename)
- return st[stat.ST_ATIME]
-
-
-# Is a path a symbolic link?
-# This will always return false on systems where os.lstat doesn't exist.
-
-def islink(path):
- """Test whether a path is a symbolic link"""
- try:
- st = os.lstat(path)
- except (os.error, AttributeError):
- return 0
- return stat.S_ISLNK(st[stat.ST_MODE])
-
-
-# Does a path exist?
-# This is false for dangling symbolic links.
-
-def exists(path):
- """Test whether a path exists. Returns false for broken symbolic links"""
- try:
- st = os.stat(path)
- except os.error:
- return 0
- return 1
-
-
-# Is a path a directory?
-# This follows symbolic links, so both islink() and isdir() can be true
-# for the same path.
-
-def isdir(path):
- """Test whether a path is a directory"""
- try:
- st = os.stat(path)
- except os.error:
- return 0
- return stat.S_ISDIR(st[stat.ST_MODE])
-
-
-# Is a path a regular file?
-# This follows symbolic links, so both islink() and isfile() can be true
-# for the same path.
-
-def isfile(path):
- """Test whether a path is a regular file"""
- try:
- st = os.stat(path)
- except os.error:
- return 0
- return stat.S_ISREG(st[stat.ST_MODE])
-
-
-# Are two filenames really pointing to the same file?
-
-def samefile(f1, f2):
- """Test whether two pathnames reference the same actual file"""
- s1 = os.stat(f1)
- s2 = os.stat(f2)
- return samestat(s1, s2)
-
-
-# Are two open files really referencing the same file?
-# (Not necessarily the same file descriptor!)
-
-def sameopenfile(fp1, fp2):
- """Test whether two open file objects reference the same file"""
- s1 = os.fstat(fp1)
- s2 = os.fstat(fp2)
- return samestat(s1, s2)
-
-
-# Are two stat buffers (obtained from stat, fstat or lstat)
-# describing the same file?
-
-def samestat(s1, s2):
- """Test whether two stat buffers reference the same file"""
- return s1[stat.ST_INO] == s2[stat.ST_INO] and \
- s1[stat.ST_DEV] == s2[stat.ST_DEV]
-
-
-# Is a path a mount point?
-# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
-
-def ismount(path):
- """Test whether a path is a mount point"""
- try:
- s1 = os.stat(path)
- s2 = os.stat(join(path, '..'))
- except os.error:
- return 0 # It doesn't exist -- so not a mount point :-)
- dev1 = s1[stat.ST_DEV]
- dev2 = s2[stat.ST_DEV]
- if dev1 != dev2:
- return 1 # path/.. on a different device as path
- ino1 = s1[stat.ST_INO]
- ino2 = s2[stat.ST_INO]
- if ino1 == ino2:
- return 1 # path/.. is the same i-node as path
- return 0
-
-
-# Directory tree walk.
-# For each directory under top (including top itself, but excluding
-# '.' and '..'), func(arg, dirname, filenames) is called, where
-# dirname is the name of the directory and filenames is the list
-# of files (and subdirectories etc.) in the directory.
-# The func may modify the filenames list, to implement a filter,
-# or to impose a different order of visiting.
-
-def walk(top, func, arg):
- """walk(top,func,arg) calls func(arg, d, files) for each directory "d"
- in the tree rooted at "top" (including "top" itself). "files" is a list
- of all the files and subdirs in directory "d".
- """
- try:
- names = os.listdir(top)
- except os.error:
- return
- func(arg, top, names)
- for name in names:
- name = join(top, name)
- st = os.lstat(name)
- if stat.S_ISDIR(st[stat.ST_MODE]):
- walk(name, func, arg)
-
-
-# Expand paths beginning with '~' or '~user'.
-# '~' means $HOME; '~user' means that user's home directory.
-# If the path doesn't begin with '~', or if the user or $HOME is unknown,
-# the path is returned unchanged (leaving error reporting to whatever
-# function is called with the expanded path as argument).
-# See also module 'glob' for expansion of *, ? and [...] in pathnames.
-# (A function should also be defined to do full *sh-style environment
-# variable expansion.)
-
-def expanduser(path):
- """Expand ~ and ~user constructions. If user or $HOME is unknown,
- do nothing."""
- if path[:1] <> '~':
- return path
- i, n = 1, len(path)
- while i < n and path[i] <> '/':
- i = i + 1
- if i == 1:
- if not os.environ.has_key('HOME'):
- return path
- userhome = os.environ['HOME']
- else:
- import pwd
- try:
- pwent = pwd.getpwnam(path[1:i])
- except KeyError:
- return path
- userhome = pwent[5]
- if userhome[-1:] == '/': i = i + 1
- return userhome + path[i:]
-
-
-# Expand paths containing shell variable substitutions.
-# This expands the forms $variable and ${variable} only.
-# Non-existent variables are left unchanged.
-
-_varprog = None
-
-def expandvars(path):
- """Expand shell variables of form $var and ${var}. Unknown variables
- are left unchanged."""
- global _varprog
- if '$' not in path:
- return path
- if not _varprog:
- import re
- _varprog = re.compile(r'\$(\w+|\{[^}]*\})')
- i = 0
- while 1:
- m = _varprog.search(path, i)
- if not m:
- break
- i, j = m.span(0)
- name = m.group(1)
- if name[:1] == '{' and name[-1:] == '}':
- name = name[1:-1]
- if os.environ.has_key(name):
- tail = path[j:]
- path = path[:i] + os.environ[name]
- i = len(path)
- path = path + tail
- else:
- i = j
- return path
-
-
-# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
-# It should be understood that this may change the meaning of the path
-# if it contains symbolic links!
-
-def normpath(path):
- """Normalize path, eliminating double slashes, etc."""
- if path == '':
- return '.'
- initial_slash = (path[0] == '/')
- comps = path.split('/')
- new_comps = []
- for comp in comps:
- if comp in ('', '.'):
- continue
- if (comp != '..' or (not initial_slash and not new_comps) or
- (new_comps and new_comps[-1] == '..')):
- new_comps.append(comp)
- elif new_comps:
- new_comps.pop()
- comps = new_comps
- path = '/'.join(comps)
- if initial_slash:
- path = '/' + path
- return path or '.'
-
-
-def abspath(path):
- """Return an absolute path."""
- if not isabs(path):
- path = join(os.getcwd(), path)
- return normpath(path)
diff --git a/Lib/dos-8x3/py_compi.py b/Lib/dos-8x3/py_compi.py
deleted file mode 100755
index b453109..0000000
--- a/Lib/dos-8x3/py_compi.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""Routine to "compile" a .py file to a .pyc (or .pyo) file.
-
-This module has intimate knowledge of the format of .pyc files.
-"""
-
-import imp
-MAGIC = imp.get_magic()
-
-def wr_long(f, x):
- """Internal; write a 32-bit int to a file in little-endian order."""
- f.write(chr( x & 0xff))
- f.write(chr((x >> 8) & 0xff))
- f.write(chr((x >> 16) & 0xff))
- f.write(chr((x >> 24) & 0xff))
-
-def compile(file, cfile=None, dfile=None):
- """Byte-compile one Python source file to Python bytecode.
-
- Arguments:
-
- file: source filename
- cfile: target filename; defaults to source with 'c' or 'o' appended
- ('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
- dfile: purported filename; defaults to source (this is the filename
- that will show up in error messages)
-
- Note that it isn't necessary to byte-compile Python modules for
- execution efficiency -- Python itself byte-compiles a module when
- it is loaded, and if it can, writes out the bytecode to the
- corresponding .pyc (or .pyo) file.
-
- However, if a Python installation is shared between users, it is a
- good idea to byte-compile all modules upon installation, since
- other users may not be able to write in the source directories,
- and thus they won't be able to write the .pyc/.pyo file, and then
- they would be byte-compiling every module each time it is loaded.
- This can slow down program start-up considerably.
-
- See compileall.py for a script/module that uses this module to
- byte-compile all installed files (or all files in selected
- directories).
-
- """
- import os, marshal, __builtin__
- f = open(file)
- try:
- timestamp = long(os.fstat(f.fileno())[8])
- except AttributeError:
- timestamp = long(os.stat(file)[8])
- codestring = f.read()
- # If parsing from a string, line breaks are \n (see parsetok.c:tok_nextc)
- # Replace will return original string if pattern is not found, so
- # we don't need to check whether it is found first.
- codestring = codestring.replace("\r\n","\n")
- codestring = codestring.replace("\r","\n")
- f.close()
- if codestring and codestring[-1] != '\n':
- codestring = codestring + '\n'
- try:
- codeobject = __builtin__.compile(codestring, dfile or file, 'exec')
- except SyntaxError, detail:
- import traceback, sys, string
- lines = traceback.format_exception_only(SyntaxError, detail)
- for line in lines:
- sys.stderr.write(string.replace(line, 'File "<string>"',
- 'File "%s"' % (dfile or file)))
- return
- if not cfile:
- cfile = file + (__debug__ and 'c' or 'o')
- fc = open(cfile, 'wb')
- fc.write('\0\0\0\0')
- wr_long(fc, timestamp)
- marshal.dump(codeobject, fc)
- fc.flush()
- fc.seek(0, 0)
- fc.write(MAGIC)
- fc.close()
- if os.name == 'mac':
- import macfs
- macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
diff --git a/Lib/dos-8x3/queue.py b/Lib/dos-8x3/queue.py
deleted file mode 100755
index 9d5f799..0000000
--- a/Lib/dos-8x3/queue.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""A multi-producer, multi-consumer queue."""
-
-# define this exception to be compatible with Python 1.5's class
-# exceptions, but also when -X option is used.
-try:
- class Empty(Exception):
- pass
- class Full(Exception):
- pass
-except TypeError:
- # string based exceptions
- # exception raised by get(block=0)/get_nowait()
- Empty = 'Queue.Empty'
- # exception raised by put(block=0)/put_nowait()
- Full = 'Queue.Full'
-
-class Queue:
- def __init__(self, maxsize=0):
- """Initialize a queue object with a given maximum size.
-
- If maxsize is <= 0, the queue size is infinite.
- """
- import thread
- self._init(maxsize)
- self.mutex = thread.allocate_lock()
- self.esema = thread.allocate_lock()
- self.esema.acquire()
- self.fsema = thread.allocate_lock()
-
- def qsize(self):
- """Return the approximate size of the queue (not reliable!)."""
- self.mutex.acquire()
- n = self._qsize()
- self.mutex.release()
- return n
-
- def empty(self):
- """Return 1 if the queue is empty, 0 otherwise (not reliable!)."""
- self.mutex.acquire()
- n = self._empty()
- self.mutex.release()
- return n
-
- def full(self):
- """Return 1 if the queue is full, 0 otherwise (not reliable!)."""
- self.mutex.acquire()
- n = self._full()
- self.mutex.release()
- return n
-
- def put(self, item, block=1):
- """Put an item into the queue.
-
- If optional arg 'block' is 1 (the default), block if
- necessary until a free slot is available. Otherwise (block
- is 0), put an item on the queue if a free slot is immediately
- available, else raise the Full exception.
- """
- if block:
- self.fsema.acquire()
- elif not self.fsema.acquire(0):
- raise Full
- self.mutex.acquire()
- was_empty = self._empty()
- self._put(item)
- if was_empty:
- self.esema.release()
- if not self._full():
- self.fsema.release()
- self.mutex.release()
-
- def put_nowait(self, item):
- """Put an item into the queue without blocking.
-
- Only enqueue the item if a free slot is immediately available.
- Otherwise raise the Full exception.
- """
- return self.put(item, 0)
-
- def get(self, block=1):
- """Remove and return an item from the queue.
-
- If optional arg 'block' is 1 (the default), block if
- necessary until an item is available. Otherwise (block is 0),
- return an item if one is immediately available, else raise the
- Empty exception.
- """
- if block:
- self.esema.acquire()
- elif not self.esema.acquire(0):
- raise Empty
- self.mutex.acquire()
- was_full = self._full()
- item = self._get()
- if was_full:
- self.fsema.release()
- if not self._empty():
- self.esema.release()
- self.mutex.release()
- return item
-
- def get_nowait(self):
- """Remove and return an item from the queue without blocking.
-
- Only get an item if one is immediately available. Otherwise
- raise the Empty exception.
- """
- return self.get(0)
-
- # Override these methods to implement other queue organizations
- # (e.g. stack or priority queue).
- # These will only be called with appropriate locks held
-
- # Initialize the queue representation
- def _init(self, maxsize):
- self.maxsize = maxsize
- self.queue = []
-
- def _qsize(self):
- return len(self.queue)
-
- # Check whether the queue is empty
- def _empty(self):
- return not self.queue
-
- # Check whether the queue is full
- def _full(self):
- return self.maxsize > 0 and len(self.queue) == self.maxsize
-
- # Put a new item in the queue
- def _put(self, item):
- self.queue.append(item)
-
- # Get an item from the queue
- def _get(self):
- item = self.queue[0]
- del self.queue[0]
- return item
diff --git a/Lib/dos-8x3/reconver.py b/Lib/dos-8x3/reconver.py
deleted file mode 100644
index 7c2d376..0000000
--- a/Lib/dos-8x3/reconver.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#! /usr/bin/env python1.5
-
-r"""Convert old ("regex") regular expressions to new syntax ("re").
-
-When imported as a module, there are two functions, with their own
-strings:
-
- convert(s, syntax=None) -- convert a regex regular expression to re syntax
-
- quote(s) -- return a quoted string literal
-
-When used as a script, read a Python string literal (or any other
-expression evaluating to a string) from stdin, and write the
-translated expression to stdout as a string literal. Unless stdout is
-a tty, no trailing \n is written to stdout. This is done so that it
-can be used with Emacs C-U M-| (shell-command-on-region with argument
-which filters the region through the shell command).
-
-No attempt has been made at coding for performance.
-
-Translation table...
-
- \( ( (unless RE_NO_BK_PARENS set)
- \) ) (unless RE_NO_BK_PARENS set)
- \| | (unless RE_NO_BK_VBAR set)
- \< \b (not quite the same, but alla...)
- \> \b (not quite the same, but alla...)
- \` \A
- \' \Z
-
-Not translated...
-
- .
- ^
- $
- *
- + (unless RE_BK_PLUS_QM set, then to \+)
- ? (unless RE_BK_PLUS_QM set, then to \?)
- \
- \b
- \B
- \w
- \W
- \1 ... \9
-
-Special cases...
-
- Non-printable characters are always replaced by their 3-digit
- escape code (except \t, \n, \r, which use mnemonic escapes)
-
- Newline is turned into | when RE_NEWLINE_OR is set
-
-XXX To be done...
-
- [...] (different treatment of backslashed items?)
- [^...] (different treatment of backslashed items?)
- ^ $ * + ? (in some error contexts these are probably treated differently)
- \vDD \DD (in the regex docs but only works when RE_ANSI_HEX set)
-
-"""
-
-
-import regex
-from regex_syntax import * # RE_*
-
-# Default translation table
-mastertable = {
- r'\<': r'\b',
- r'\>': r'\b',
- r'\`': r'\A',
- r'\'': r'\Z',
- r'\(': '(',
- r'\)': ')',
- r'\|': '|',
- '(': r'\(',
- ')': r'\)',
- '|': r'\|',
- '\t': r'\t',
- '\n': r'\n',
- '\r': r'\r',
-}
-
-
-def convert(s, syntax=None):
- """Convert a regex regular expression to re syntax.
-
- The first argument is the regular expression, as a string object,
- just like it would be passed to regex.compile(). (I.e., pass the
- actual string object -- string quotes must already have been
- removed and the standard escape processing has already been done,
- e.g. by eval().)
-
- The optional second argument is the regex syntax variant to be
- used. This is an integer mask as passed to regex.set_syntax();
- the flag bits are defined in regex_syntax. When not specified, or
- when None is given, the current regex syntax mask (as retrieved by
- regex.get_syntax()) is used -- which is 0 by default.
-
- The return value is a regular expression, as a string object that
- could be passed to re.compile(). (I.e., no string quotes have
- been added -- use quote() below, or repr().)
-
- The conversion is not always guaranteed to be correct. More
- syntactical analysis should be performed to detect borderline
- cases and decide what to do with them. For example, 'x*?' is not
- translated correctly.
-
- """
- table = mastertable.copy()
- if syntax is None:
- syntax = regex.get_syntax()
- if syntax & RE_NO_BK_PARENS:
- del table[r'\('], table[r'\)']
- del table['('], table[')']
- if syntax & RE_NO_BK_VBAR:
- del table[r'\|']
- del table['|']
- if syntax & RE_BK_PLUS_QM:
- table['+'] = r'\+'
- table['?'] = r'\?'
- table[r'\+'] = '+'
- table[r'\?'] = '?'
- if syntax & RE_NEWLINE_OR:
- table['\n'] = '|'
- res = ""
-
- i = 0
- end = len(s)
- while i < end:
- c = s[i]
- i = i+1
- if c == '\\':
- c = s[i]
- i = i+1
- key = '\\' + c
- key = table.get(key, key)
- res = res + key
- else:
- c = table.get(c, c)
- res = res + c
- return res
-
-
-def quote(s, quote=None):
- """Convert a string object to a quoted string literal.
-
- This is similar to repr() but will return a "raw" string (r'...'
- or r"...") when the string contains backslashes, instead of
- doubling all backslashes. The resulting string does *not* always
- evaluate to the same string as the original; however it will do
- just the right thing when passed into re.compile().
-
- The optional second argument forces the string quote; it must be
- a single character which is a valid Python string quote.
-
- """
- if quote is None:
- q = "'"
- altq = "'"
- if q in s and altq not in s:
- q = altq
- else:
- assert quote in ('"', "'")
- q = quote
- res = q
- for c in s:
- if c == q: c = '\\' + c
- elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
- res = res + c
- res = res + q
- if '\\' in res:
- res = 'r' + res
- return res
-
-
-def main():
- """Main program -- called when run as a script."""
- import sys
- s = eval(sys.stdin.read())
- sys.stdout.write(quote(convert(s)))
- if sys.stdout.isatty():
- sys.stdout.write("\n")
-
-
-if __name__ == '__main__':
- main()
diff --git a/Lib/dos-8x3/regex_sy.py b/Lib/dos-8x3/regex_sy.py
deleted file mode 100755
index aab7e7a..0000000
--- a/Lib/dos-8x3/regex_sy.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""Constants for selecting regexp syntaxes for the obsolete regex module.
-
-This module is only for backward compatibility. "regex" has now
-been replaced by the new regular expression module, "re".
-
-These bits are passed to regex.set_syntax() to choose among
-alternative regexp syntaxes.
-"""
-
-# 1 means plain parentheses serve as grouping, and backslash
-# parentheses are needed for literal searching.
-# 0 means backslash-parentheses are grouping, and plain parentheses
-# are for literal searching.
-RE_NO_BK_PARENS = 1
-
-# 1 means plain | serves as the "or"-operator, and \| is a literal.
-# 0 means \| serves as the "or"-operator, and | is a literal.
-RE_NO_BK_VBAR = 2
-
-# 0 means plain + or ? serves as an operator, and \+, \? are literals.
-# 1 means \+, \? are operators and plain +, ? are literals.
-RE_BK_PLUS_QM = 4
-
-# 1 means | binds tighter than ^ or $.
-# 0 means the contrary.
-RE_TIGHT_VBAR = 8
-
-# 1 means treat \n as an _OR operator
-# 0 means treat it as a normal character
-RE_NEWLINE_OR = 16
-
-# 0 means that a special characters (such as *, ^, and $) always have
-# their special meaning regardless of the surrounding context.
-# 1 means that special characters may act as normal characters in some
-# contexts. Specifically, this applies to:
-# ^ - only special at the beginning, or after ( or |
-# $ - only special at the end, or before ) or |
-# *, +, ? - only special when not after the beginning, (, or |
-RE_CONTEXT_INDEP_OPS = 32
-
-# ANSI sequences (\n etc) and \xhh
-RE_ANSI_HEX = 64
-
-# No GNU extensions
-RE_NO_GNU_EXTENSIONS = 128
-
-# Now define combinations of bits for the standard possibilities.
-RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
-RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
-RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
-RE_SYNTAX_EMACS = 0
-
-# (Python's obsolete "regexp" module used a syntax similar to awk.)
diff --git a/Lib/dos-8x3/regex_te.py b/Lib/dos-8x3/regex_te.py
deleted file mode 100644
index dcb980a..0000000
--- a/Lib/dos-8x3/regex_te.py
+++ /dev/null
@@ -1,289 +0,0 @@
-
-# Regex test suite and benchmark suite v1.5a2
-# Due to the use of r"aw" strings, this file will
-# only work with Python 1.5 or higher.
-
-# The 3 possible outcomes for each pattern
-[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
-
-# Benchmark suite (needs expansion)
-#
-# The benchmark suite does not test correctness, just speed. The
-# first element of each tuple is the regex pattern; the second is a
-# string to match it against. The benchmarking code will embed the
-# second string inside several sizes of padding, to test how regex
-# matching performs on large strings.
-
-benchmarks = [
- ('Python', 'Python'), # Simple text literal
- ('.*Python', 'Python'), # Bad text literal
- ('.*Python.*', 'Python'), # Worse text literal
- ('.*\\(Python\\)', 'Python'), # Bad text literal with grouping
-
- ('(Python\\|Perl\\|Tcl', 'Perl'), # Alternation
- ('\\(Python\\|Perl\\|Tcl\\)', 'Perl'), # Grouped alternation
- ('\\(Python\\)\\1', 'PythonPython'), # Backreference
-# ('\\([0a-z][a-z]*,\\)+', 'a5,b7,c9,'), # Disable the fastmap optimization
- ('\\([a-z][a-z0-9]*,\\)+', 'a5,b7,c9,') # A few sets
-]
-
-# Test suite (for verifying correctness)
-#
-# The test suite is a list of 5- or 3-tuples. The 5 parts of a
-# complete tuple are:
-# element 0: a string containing the pattern
-# 1: the string to match against the pattern
-# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
-# 3: a string that will be eval()'ed to produce a test string.
-# This is an arbitrary Python expression; the available
-# variables are "found" (the whole match), and "g1", "g2", ...
-# up to "g10" contain the contents of each group, or the
-# string 'None' if the group wasn't given a value.
-# 4: The expected result of evaluating the expression.
-# If the two don't match, an error is reported.
-#
-# If the regex isn't expected to work, the latter two elements can be omitted.
-
-tests = [
-('abc', 'abc', SUCCEED,
- 'found', 'abc'),
-('abc', 'xbc', FAIL),
-('abc', 'axc', FAIL),
-('abc', 'abx', FAIL),
-('abc', 'xabcy', SUCCEED,
- 'found', 'abc'),
-('abc', 'ababc', SUCCEED,
- 'found', 'abc'),
-('ab*c', 'abc', SUCCEED,
- 'found', 'abc'),
-('ab*bc', 'abc', SUCCEED,
- 'found', 'abc'),
-('ab*bc', 'abbc', SUCCEED,
- 'found', 'abbc'),
-('ab*bc', 'abbbbc', SUCCEED,
- 'found', 'abbbbc'),
-('ab+bc', 'abbc', SUCCEED,
- 'found', 'abbc'),
-('ab+bc', 'abc', FAIL),
-('ab+bc', 'abq', FAIL),
-('ab+bc', 'abbbbc', SUCCEED,
- 'found', 'abbbbc'),
-('ab?bc', 'abbc', SUCCEED,
- 'found', 'abbc'),
-('ab?bc', 'abc', SUCCEED,
- 'found', 'abc'),
-('ab?bc', 'abbbbc', FAIL),
-('ab?c', 'abc', SUCCEED,
- 'found', 'abc'),
-('^abc$', 'abc', SUCCEED,
- 'found', 'abc'),
-('^abc$', 'abcc', FAIL),
-('^abc', 'abcc', SUCCEED,
- 'found', 'abc'),
-('^abc$', 'aabc', FAIL),
-('abc$', 'aabc', SUCCEED,
- 'found', 'abc'),
-('^', 'abc', SUCCEED,
- 'found+"-"', '-'),
-('$', 'abc', SUCCEED,
- 'found+"-"', '-'),
-('a.c', 'abc', SUCCEED,
- 'found', 'abc'),
-('a.c', 'axc', SUCCEED,
- 'found', 'axc'),
-('a.*c', 'axyzc', SUCCEED,
- 'found', 'axyzc'),
-('a.*c', 'axyzd', FAIL),
-('a[bc]d', 'abc', FAIL),
-('a[bc]d', 'abd', SUCCEED,
- 'found', 'abd'),
-('a[b-d]e', 'abd', FAIL),
-('a[b-d]e', 'ace', SUCCEED,
- 'found', 'ace'),
-('a[b-d]', 'aac', SUCCEED,
- 'found', 'ac'),
-('a[-b]', 'a-', SUCCEED,
- 'found', 'a-'),
-('a[b-]', 'a-', SUCCEED,
- 'found', 'a-'),
-('a[]b', '-', SYNTAX_ERROR),
-('a[', '-', SYNTAX_ERROR),
-('a\\', '-', SYNTAX_ERROR),
-('abc\\)', '-', SYNTAX_ERROR),
-('\\(abc', '-', SYNTAX_ERROR),
-('a]', 'a]', SUCCEED,
- 'found', 'a]'),
-('a[]]b', 'a]b', SUCCEED,
- 'found', 'a]b'),
-('a[^bc]d', 'aed', SUCCEED,
- 'found', 'aed'),
-('a[^bc]d', 'abd', FAIL),
-('a[^-b]c', 'adc', SUCCEED,
- 'found', 'adc'),
-('a[^-b]c', 'a-c', FAIL),
-('a[^]b]c', 'a]c', FAIL),
-('a[^]b]c', 'adc', SUCCEED,
- 'found', 'adc'),
-('\\ba\\b', 'a-', SUCCEED,
- '"-"', '-'),
-('\\ba\\b', '-a', SUCCEED,
- '"-"', '-'),
-('\\ba\\b', '-a-', SUCCEED,
- '"-"', '-'),
-('\\by\\b', 'xy', FAIL),
-('\\by\\b', 'yz', FAIL),
-('\\by\\b', 'xyz', FAIL),
-('ab\\|cd', 'abc', SUCCEED,
- 'found', 'ab'),
-('ab\\|cd', 'abcd', SUCCEED,
- 'found', 'ab'),
-('\\(\\)ef', 'def', SUCCEED,
- 'found+"-"+g1', 'ef-'),
-('$b', 'b', FAIL),
-('a(b', 'a(b', SUCCEED,
- 'found+"-"+g1', 'a(b-None'),
-('a(*b', 'ab', SUCCEED,
- 'found', 'ab'),
-('a(*b', 'a((b', SUCCEED,
- 'found', 'a((b'),
-('a\\\\b', 'a\\b', SUCCEED,
- 'found', 'a\\b'),
-('\\(\\(a\\)\\)', 'abc', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'a-a-a'),
-('\\(a\\)b\\(c\\)', 'abc', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'abc-a-c'),
-('a+b+c', 'aabbabc', SUCCEED,
- 'found', 'abc'),
-('\\(a+\\|b\\)*', 'ab', SUCCEED,
- 'found+"-"+g1', 'ab-b'),
-('\\(a+\\|b\\)+', 'ab', SUCCEED,
- 'found+"-"+g1', 'ab-b'),
-('\\(a+\\|b\\)?', 'ab', SUCCEED,
- 'found+"-"+g1', 'a-a'),
-('\\)\\(', '-', SYNTAX_ERROR),
-('[^ab]*', 'cde', SUCCEED,
- 'found', 'cde'),
-('abc', '', FAIL),
-('a*', '', SUCCEED,
- 'found', ''),
-('a\\|b\\|c\\|d\\|e', 'e', SUCCEED,
- 'found', 'e'),
-('\\(a\\|b\\|c\\|d\\|e\\)f', 'ef', SUCCEED,
- 'found+"-"+g1', 'ef-e'),
-('abcd*efg', 'abcdefg', SUCCEED,
- 'found', 'abcdefg'),
-('ab*', 'xabyabbbz', SUCCEED,
- 'found', 'ab'),
-('ab*', 'xayabbbz', SUCCEED,
- 'found', 'a'),
-('\\(ab\\|cd\\)e', 'abcde', SUCCEED,
- 'found+"-"+g1', 'cde-cd'),
-('[abhgefdc]ij', 'hij', SUCCEED,
- 'found', 'hij'),
-('^\\(ab\\|cd\\)e', 'abcde', FAIL,
- 'xg1y', 'xy'),
-('\\(abc\\|\\)ef', 'abcdef', SUCCEED,
- 'found+"-"+g1', 'ef-'),
-('\\(a\\|b\\)c*d', 'abcd', SUCCEED,
- 'found+"-"+g1', 'bcd-b'),
-('\\(ab\\|ab*\\)bc', 'abc', SUCCEED,
- 'found+"-"+g1', 'abc-a'),
-('a\\([bc]*\\)c*', 'abc', SUCCEED,
- 'found+"-"+g1', 'abc-bc'),
-('a\\([bc]*\\)\\(c*d\\)', 'abcd', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
-('a\\([bc]+\\)\\(c*d\\)', 'abcd', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
-('a\\([bc]*\\)\\(c+d\\)', 'abcd', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
-('a[bcd]*dcdcde', 'adcdcde', SUCCEED,
- 'found', 'adcdcde'),
-('a[bcd]+dcdcde', 'adcdcde', FAIL),
-('\\(ab\\|a\\)b*c', 'abc', SUCCEED,
- 'found+"-"+g1', 'abc-ab'),
-('\\(\\(a\\)\\(b\\)c\\)\\(d\\)', 'abcd', SUCCEED,
- 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
-('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED,
- 'found', 'alpha'),
-('^a\\(bc+\\|b[eh]\\)g\\|.h$', 'abh', SUCCEED,
- 'found+"-"+g1', 'bh-None'),
-('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effgz', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
-('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'ij', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
-('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effg', FAIL),
-('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'bcdd', FAIL),
-('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'reffgz', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
-('\\(\\(\\(\\(\\(\\(\\(\\(\\(a\\)\\)\\)\\)\\)\\)\\)\\)\\)', 'a', SUCCEED,
- 'found', 'a'),
-('multiple words of text', 'uh-uh', FAIL),
-('multiple words', 'multiple words, yeah', SUCCEED,
- 'found', 'multiple words'),
-('\\(.*\\)c\\(.*\\)', 'abcde', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
-('(\\(.*\\), \\(.*\\))', '(a, b)', SUCCEED,
- 'g2+"-"+g1', 'b-a'),
-('[k]', 'ab', FAIL),
-('a[-]?c', 'ac', SUCCEED,
- 'found', 'ac'),
-('\\(abc\\)\\1', 'abcabc', SUCCEED,
- 'g1', 'abc'),
-('\\([a-c]*\\)\\1', 'abcabc', SUCCEED,
- 'g1', 'abc'),
-('^\\(.+\\)?B', 'AB', SUCCEED,
- 'g1', 'A'),
-('\\(a+\\).\\1$', 'aaaaa', SUCCEED,
- 'found+"-"+g1', 'aaaaa-aa'),
-('^\\(a+\\).\\1$', 'aaaa', FAIL),
-('\\(abc\\)\\1', 'abcabc', SUCCEED,
- 'found+"-"+g1', 'abcabc-abc'),
-('\\([a-c]+\\)\\1', 'abcabc', SUCCEED,
- 'found+"-"+g1', 'abcabc-abc'),
-('\\(a\\)\\1', 'aa', SUCCEED,
- 'found+"-"+g1', 'aa-a'),
-('\\(a+\\)\\1', 'aa', SUCCEED,
- 'found+"-"+g1', 'aa-a'),
-('\\(a+\\)+\\1', 'aa', SUCCEED,
- 'found+"-"+g1', 'aa-a'),
-('\\(a\\).+\\1', 'aba', SUCCEED,
- 'found+"-"+g1', 'aba-a'),
-('\\(a\\)ba*\\1', 'aba', SUCCEED,
- 'found+"-"+g1', 'aba-a'),
-('\\(aa\\|a\\)a\\1$', 'aaa', SUCCEED,
- 'found+"-"+g1', 'aaa-a'),
-('\\(a\\|aa\\)a\\1$', 'aaa', SUCCEED,
- 'found+"-"+g1', 'aaa-a'),
-('\\(a+\\)a\\1$', 'aaa', SUCCEED,
- 'found+"-"+g1', 'aaa-a'),
-('\\([abc]*\\)\\1', 'abcabc', SUCCEED,
- 'found+"-"+g1', 'abcabc-abc'),
-('\\(a\\)\\(b\\)c\\|ab', 'ab', SUCCEED,
- 'found+"-"+g1+"-"+g2', 'ab-None-None'),
-('\\(a\\)+x', 'aaax', SUCCEED,
- 'found+"-"+g1', 'aaax-a'),
-('\\([ac]\\)+x', 'aacx', SUCCEED,
- 'found+"-"+g1', 'aacx-c'),
-('\\([^/]*/\\)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED,
- 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
-('\\([^.]*\\)\\.\\([^:]*\\):[T ]+\\(.*\\)', 'track1.title:TBlah blah blah', SUCCEED,
- 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
-('\\([^N]*N\\)+', 'abNNxyzN', SUCCEED,
- 'found+"-"+g1', 'abNNxyzN-xyzN'),
-('\\([^N]*N\\)+', 'abNNxyz', SUCCEED,
- 'found+"-"+g1', 'abNN-N'),
-('\\([abc]*\\)x', 'abcx', SUCCEED,
- 'found+"-"+g1', 'abcx-abc'),
-('\\([abc]*\\)x', 'abc', FAIL),
-('\\([xyz]*\\)x', 'abcx', SUCCEED,
- 'found+"-"+g1', 'x-'),
-('\\(a\\)+b\\|aac', 'aac', SUCCEED,
- 'found+"-"+g1', 'aac-None'),
-('\<a', 'a', SUCCEED, 'found', 'a'),
-('\<a', '!', FAIL),
-('a\<b', 'ab', FAIL),
-('a\>', 'ab', FAIL),
-('a\>', 'a!', SUCCEED, 'found', 'a'),
-('a\>', 'a', SUCCEED, 'found', 'a'),
-]
-
diff --git a/Lib/dos-8x3/rlcomple.py b/Lib/dos-8x3/rlcomple.py
deleted file mode 100644
index 8cd21ed..0000000
--- a/Lib/dos-8x3/rlcomple.py
+++ /dev/null
@@ -1,120 +0,0 @@
-"""Word completion for GNU readline 2.0.
-
-This requires the latest extension to the readline module (the
-completes keywords, built-ins and globals in __main__; when completing
-NAME.NAME..., it evaluates (!) the expression up to the last dot and
-completes its attributes.
-
-It's very cool to do "import string" type "string.", hit the
-completion key (twice), and see the list of names defined by the
-string module!
-
-Tip: to use the tab key as the completion key, call
-
- readline.parse_and_bind("tab: complete")
-
-Notes:
-
-- Exceptions raised by the completer function are *ignored* (and
-generally cause the completion to fail). This is a feature -- since
-readline sets the tty device in raw (or cbreak) mode, printing a
-traceback wouldn't work well without some complicated hoopla to save,
-reset and restore the tty state.
-
-- The evaluation of the NAME.NAME... form may cause arbitrary
-application defined code to be executed if an object with a
-__getattr__ hook is found. Since it is the responsibility of the
-application (or the user) to enable this feature, I consider this an
-acceptable risk. More complicated expressions (e.g. function calls or
-indexing operations) are *not* evaluated.
-
-- GNU readline is also used by the built-in functions input() and
-raw_input(), and thus these also benefit/suffer from the completer
-features. Clearly an interactive application can benefit by
-specifying its own completer function and using raw_input() for all
-its input.
-
-- When the original stdin is not a tty device, GNU readline is never
-used, and this module (and the readline module) are silently inactive.
-
-"""
-
-import readline
-import __builtin__
-import __main__
-
-class Completer:
-
- def complete(self, text, state):
- """Return the next possible completion for 'text'.
-
- This is called successively with state == 0, 1, 2, ... until it
- returns None. The completion should begin with 'text'.
-
- """
- if state == 0:
- if "." in text:
- self.matches = self.attr_matches(text)
- else:
- self.matches = self.global_matches(text)
- try:
- return self.matches[state]
- except IndexError:
- return None
-
- def global_matches(self, text):
- """Compute matches when text is a simple name.
-
- Return a list of all keywords, built-in functions and names
- currently defines in __main__ that match.
-
- """
- import keyword
- matches = []
- n = len(text)
- for list in [keyword.kwlist,
- __builtin__.__dict__.keys(),
- __main__.__dict__.keys()]:
- for word in list:
- if word[:n] == text and word != "__builtins__":
- matches.append(word)
- return matches
-
- def attr_matches(self, text):
- """Compute matches when text contains a dot.
-
- Assuming the text is of the form NAME.NAME....[NAME], and is
- evaluatable in the globals of __main__, it will be evaluated
- and its attributes (as revealed by dir()) are used as possible
- completions. (For class instances, class members are are also
- considered.)
-
- WARNING: this can still invoke arbitrary C code, if an object
- with a __getattr__ hook is evaluated.
-
- """
- import re
- m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
- if not m:
- return
- expr, attr = m.group(1, 3)
- object = eval(expr, __main__.__dict__)
- words = dir(object)
- if hasattr(object,'__class__'):
- words.append('__class__')
- words = words + get_class_members(object.__class__)
- matches = []
- n = len(attr)
- for word in words:
- if word[:n] == attr and word != "__builtins__":
- matches.append("%s.%s" % (expr, word))
- return matches
-
-def get_class_members(klass):
- ret = dir(klass)
- if hasattr(klass,'__bases__'):
- for base in klass.__bases__:
- ret = ret + get_class_members(base)
- return ret
-
-readline.set_completer(Completer().complete)
diff --git a/Lib/dos-8x3/robotpar.py b/Lib/dos-8x3/robotpar.py
deleted file mode 100644
index 3f4396b..0000000
--- a/Lib/dos-8x3/robotpar.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-
-Robots.txt file parser class. Accepts a list of lines or robots.txt URL as
-input, builds a set of rules from that list, then answers questions about
-fetchability of other URLs.
-
-"""
-
-class RobotFileParser:
-
- def __init__(self):
- self.rules = {}
- self.debug = 0
- self.url = ''
- self.last_checked = 0
-
- def mtime(self):
- return self.last_checked
-
- def modified(self):
- import time
- self.last_checked = time.time()
-
- def set_url(self, url):
- self.url = url
-
- def read(self):
- import urllib
- self.parse(urllib.urlopen(self.url).readlines())
-
- def parse(self, lines):
- """parse the input lines from a robot.txt file"""
- import string, re
- active = []
- for line in lines:
- if self.debug: print '>', line,
- # blank line terminates current record
- if not line[:-1]:
- active = []
- continue
- # remove optional comment and strip line
- line = string.strip(line[:string.find(line, '#')])
- if not line:
- continue
- line = re.split(' *: *', line)
- if len(line) == 2:
- line[0] = string.lower(line[0])
- if line[0] == 'user-agent':
- # this record applies to this user agent
- if self.debug: print '>> user-agent:', line[1]
- active.append(line[1])
- if not self.rules.has_key(line[1]):
- self.rules[line[1]] = []
- elif line[0] == 'disallow':
- if line[1]:
- if self.debug: print '>> disallow:', line[1]
- for agent in active:
- self.rules[agent].append(re.compile(line[1]))
- else:
- pass
- for agent in active:
- if self.debug: print '>> allow', agent
- self.rules[agent] = []
- else:
- if self.debug: print '>> unknown:', line
-
- self.modified()
-
- # returns true if agent is allowed to fetch url
- def can_fetch(self, useragent, url):
- """using the parsed robots.txt decide if useragent can fetch url"""
- import urlparse
- ag = useragent
- if not self.rules.has_key(ag): ag = '*'
- if not self.rules.has_key(ag):
- if self.debug: print '>> allowing', url, 'fetch by', useragent
- return 1
- path = urlparse.urlparse(url)[2]
- for rule in self.rules[ag]:
- if rule.match(path) is not None:
- if self.debug: print '>> disallowing', url, 'fetch by', useragent
- return 0
- if self.debug: print '>> allowing', url, 'fetch by', useragent
- return 1
-
-def _test():
- rp = RobotFileParser()
- rp.debug = 1
- rp.set_url('http://www.musi-cal.com/robots.txt')
- rp.read()
- print rp.rules
- print rp.can_fetch('*', 'http://www.musi-cal.com.com/')
- print rp.can_fetch('Musi-Cal-Robot',
- 'http://www.musi-cal.com/cgi-bin/event-search?city=San+Francisco')
-
-if __name__ == "__main__":
- _test()
diff --git a/Lib/dos-8x3/simpleht.py b/Lib/dos-8x3/simpleht.py
deleted file mode 100755
index 4cfedbc..0000000
--- a/Lib/dos-8x3/simpleht.py
+++ /dev/null
@@ -1,198 +0,0 @@
-"""Simple HTTP Server.
-
-This module builds on BaseHTTPServer by implementing the standard GET
-and HEAD requests in a fairly straightforward manner.
-
-"""
-
-
-__version__ = "0.5"
-
-
-import os
-import string
-import posixpath
-import BaseHTTPServer
-import urllib
-import cgi
-import shutil
-from StringIO import StringIO
-
-
-class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
-
- """Simple HTTP request handler with GET and HEAD commands.
-
- This serves files from the current directory and any of its
- subdirectories. It assumes that all files are plain text files
- unless they have the extension ".html" in which case it assumes
- they are HTML files.
-
- The GET and HEAD requests are identical except that the HEAD
- request omits the actual contents of the file.
-
- """
-
- server_version = "SimpleHTTP/" + __version__
-
- def do_GET(self):
- """Serve a GET request."""
- f = self.send_head()
- if f:
- self.copyfile(f, self.wfile)
- f.close()
-
- def do_HEAD(self):
- """Serve a HEAD request."""
- f = self.send_head()
- if f:
- f.close()
-
- def send_head(self):
- """Common code for GET and HEAD commands.
-
- This sends the response code and MIME headers.
-
- Return value is either a file object (which has to be copied
- to the outputfile by the caller unless the command was HEAD,
- and must be closed by the caller under all circumstances), or
- None, in which case the caller has nothing further to do.
-
- """
- path = self.translate_path(self.path)
- f = None
- if os.path.isdir(path):
- for index in "index.html", "index.htm":
- index = os.path.join(path, index)
- if os.path.exists(index):
- path = index
- break
- else:
- return self.list_directory(path)
- ctype = self.guess_type(path)
- if ctype.startswith('text/'):
- mode = 'r'
- else:
- mode = 'rb'
- try:
- f = open(path, mode)
- except IOError:
- self.send_error(404, "File not found")
- return None
- self.send_response(200)
- self.send_header("Content-type", ctype)
- self.end_headers()
- return f
-
- def list_directory(self, path):
- """Helper to produce a directory listing (absent index.html).
-
- Return value is either a file object, or None (indicating an
- error). In either case, the headers are sent, making the
- interface the same as for send_head().
-
- """
- try:
- list = os.listdir(path)
- except os.error:
- self.send_error(404, "No permission to list directory");
- return None
- list.sort(lambda a, b: cmp(a.lower(), b.lower()))
- f = StringIO()
- f.write("<title>Directory listing for %s</title>\n" % self.path)
- f.write("<h2>Directory listing for %s</h2>\n" % self.path)
- f.write("<hr>\n<ul>\n")
- for name in list:
- fullname = os.path.join(path, name)
- displayname = linkname = name = cgi.escape(name)
- # Append / for directories or @ for symbolic links
- if os.path.isdir(fullname):
- displayname = name + "/"
- linkname = name + "/"
- if os.path.islink(fullname):
- displayname = name + "@"
- # Note: a link to a directory displays with @ and links with /
- f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
- f.write("</ul>\n<hr>\n")
- f.seek(0)
- self.send_response(200)
- self.send_header("Content-type", "text/html")
- self.end_headers()
- return f
-
- def translate_path(self, path):
- """Translate a /-separated PATH to the local filename syntax.
-
- Components that mean special things to the local file system
- (e.g. drive or directory names) are ignored. (XXX They should
- probably be diagnosed.)
-
- """
- path = posixpath.normpath(urllib.unquote(path))
- words = string.splitfields(path, '/')
- words = filter(None, words)
- path = os.getcwd()
- for word in words:
- drive, word = os.path.splitdrive(word)
- head, word = os.path.split(word)
- if word in (os.curdir, os.pardir): continue
- path = os.path.join(path, word)
- return path
-
- def copyfile(self, source, outputfile):
- """Copy all data between two file objects.
-
- The SOURCE argument is a file object open for reading
- (or anything with a read() method) and the DESTINATION
- argument is a file object open for writing (or
- anything with a write() method).
-
- The only reason for overriding this would be to change
- the block size or perhaps to replace newlines by CRLF
- -- note however that this the default server uses this
- to copy binary data as well.
-
- """
- shutil.copyfileobj(source, outputfile)
-
- def guess_type(self, path):
- """Guess the type of a file.
-
- Argument is a PATH (a filename).
-
- Return value is a string of the form type/subtype,
- usable for a MIME Content-type header.
-
- The default implementation looks the file's extension
- up in the table self.extensions_map, using text/plain
- as a default; however it would be permissible (if
- slow) to look inside the data to make a better guess.
-
- """
-
- base, ext = posixpath.splitext(path)
- if self.extensions_map.has_key(ext):
- return self.extensions_map[ext]
- ext = string.lower(ext)
- if self.extensions_map.has_key(ext):
- return self.extensions_map[ext]
- else:
- return self.extensions_map['']
-
- extensions_map = {
- '': 'text/plain', # Default, *must* be present
- '.html': 'text/html',
- '.htm': 'text/html',
- '.gif': 'image/gif',
- '.jpg': 'image/jpeg',
- '.jpeg': 'image/jpeg',
- }
-
-
-def test(HandlerClass = SimpleHTTPRequestHandler,
- ServerClass = BaseHTTPServer.HTTPServer):
- BaseHTTPServer.test(HandlerClass, ServerClass)
-
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/socketse.py b/Lib/dos-8x3/socketse.py
deleted file mode 100755
index a263f8e..0000000
--- a/Lib/dos-8x3/socketse.py
+++ /dev/null
@@ -1,447 +0,0 @@
-"""Generic socket server classes.
-
-This module tries to capture the various aspects of defining a server:
-
-- address family:
- - AF_INET: IP (Internet Protocol) sockets (default)
- - AF_UNIX: Unix domain sockets
- - others, e.g. AF_DECNET are conceivable (see <socket.h>
-- socket type:
- - SOCK_STREAM (reliable stream, e.g. TCP)
- - SOCK_DGRAM (datagrams, e.g. UDP)
-- client address verification before further looking at the request
- (This is actually a hook for any processing that needs to look
- at the request before anything else, e.g. logging)
-- how to handle multiple requests:
- - synchronous (one request is handled at a time)
- - forking (each request is handled by a new process)
- - threading (each request is handled by a new thread)
-
-The classes in this module favor the server type that is simplest to
-write: a synchronous TCP/IP server. This is bad class design, but
-save some typing. (There's also the issue that a deep class hierarchy
-slows down method lookups.)
-
-There are four classes in an inheritance diagram that represent
-synchronous servers of four types:
-
- +-----------+ +------------------+
- | TCPServer |------->| UnixStreamServer |
- +-----------+ +------------------+
- |
- v
- +-----------+ +--------------------+
- | UDPServer |------->| UnixDatagramServer |
- +-----------+ +--------------------+
-
-Note that UnixDatagramServer derives from UDPServer, not from
-UnixStreamServer -- the only difference between an IP and a Unix
-stream server is the address family, which is simply repeated in both
-unix server classes.
-
-Forking and threading versions of each type of server can be created
-using the ForkingServer and ThreadingServer mix-in classes. For
-instance, a threading UDP server class is created as follows:
-
- class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
-
-The Mix-in class must come first, since it overrides a method defined
-in UDPServer!
-
-To implement a service, you must derive a class from
-BaseRequestHandler and redefine its handle() method. You can then run
-various versions of the service by combining one of the server classes
-with your request handler class.
-
-The request handler class must be different for datagram or stream
-services. This can be hidden by using the mix-in request handler
-classes StreamRequestHandler or DatagramRequestHandler.
-
-Of course, you still have to use your head!
-
-For instance, it makes no sense to use a forking server if the service
-contains state in memory that can be modified by requests (since the
-modifications in the child process would never reach the initial state
-kept in the parent process and passed to each child). In this case,
-you can use a threading server, but you will probably have to use
-locks to avoid two requests that come in nearly simultaneous to apply
-conflicting changes to the server state.
-
-On the other hand, if you are building e.g. an HTTP server, where all
-data is stored externally (e.g. in the file system), a synchronous
-class will essentially render the service "deaf" while one request is
-being handled -- which may be for a very long time if a client is slow
-to reqd all the data it has requested. Here a threading or forking
-server is appropriate.
-
-In some cases, it may be appropriate to process part of a request
-synchronously, but to finish processing in a forked child depending on
-the request data. This can be implemented by using a synchronous
-server and doing an explicit fork in the request handler class's
-handle() method.
-
-Another approach to handling multiple simultaneous requests in an
-environment that supports neither threads nor fork (or where these are
-too expensive or inappropriate for the service) is to maintain an
-explicit table of partially finished requests and to use select() to
-decide which request to work on next (or whether to handle a new
-incoming request). This is particularly important for stream services
-where each client can potentially be connected for a long time (if
-threads or subprocesses can't be used).
-
-Future work:
-- Standard classes for Sun RPC (which uses either UDP or TCP)
-- Standard mix-in classes to implement various authentication
- and encryption schemes
-- Standard framework for select-based multiplexing
-
-XXX Open problems:
-- What to do with out-of-band data?
-
-"""
-
-
-__version__ = "0.2"
-
-
-import socket
-import sys
-import os
-
-
-class TCPServer:
-
- """Base class for various socket-based server classes.
-
- Defaults to synchronous IP stream (i.e., TCP).
-
- Methods for the caller:
-
- - __init__(server_address, RequestHandlerClass)
- - serve_forever()
- - handle_request() # if you don't use serve_forever()
- - fileno() -> int # for select()
-
- Methods that may be overridden:
-
- - server_bind()
- - server_activate()
- - get_request() -> request, client_address
- - verify_request(request, client_address)
- - process_request(request, client_address)
- - handle_error()
-
- Methods for derived classes:
-
- - finish_request(request, client_address)
-
- Class variables that may be overridden by derived classes or
- instances:
-
- - address_family
- - socket_type
- - request_queue_size (only for stream sockets)
- - reuse_address
-
- Instance variables:
-
- - server_address
- - RequestHandlerClass
- - socket
-
- """
-
- address_family = socket.AF_INET
-
- socket_type = socket.SOCK_STREAM
-
- request_queue_size = 5
-
- allow_reuse_address = 0
-
- def __init__(self, server_address, RequestHandlerClass):
- """Constructor. May be extended, do not override."""
- self.server_address = server_address
- self.RequestHandlerClass = RequestHandlerClass
- self.socket = socket.socket(self.address_family,
- self.socket_type)
- self.server_bind()
- self.server_activate()
-
- def server_bind(self):
- """Called by constructor to bind the socket.
-
- May be overridden.
-
- """
- if self.allow_reuse_address:
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- self.socket.bind(self.server_address)
-
- def server_activate(self):
- """Called by constructor to activate the server.
-
- May be overridden.
-
- """
- self.socket.listen(self.request_queue_size)
-
- def fileno(self):
- """Return socket file number.
-
- Interface required by select().
-
- """
- return self.socket.fileno()
-
- def serve_forever(self):
- """Handle one request at a time until doomsday."""
- while 1:
- self.handle_request()
-
- # The distinction between handling, getting, processing and
- # finishing a request is fairly arbitrary. Remember:
- #
- # - handle_request() is the top-level call. It calls
- # get_request(), verify_request() and process_request()
- # - get_request() is different for stream or datagram sockets
- # - process_request() is the place that may fork a new process
- # or create a new thread to finish the request
- # - finish_request() instantiates the request handler class;
- # this constructor will handle the request all by itself
-
- def handle_request(self):
- """Handle one request, possibly blocking."""
- try:
- request, client_address = self.get_request()
- except socket.error:
- return
- if self.verify_request(request, client_address):
- try:
- self.process_request(request, client_address)
- except:
- self.handle_error(request, client_address)
-
- def get_request(self):
- """Get the request and client address from the socket.
-
- May be overridden.
-
- """
- return self.socket.accept()
-
- def verify_request(self, request, client_address):
- """Verify the request. May be overridden.
-
- Return true if we should proceed with this request.
-
- """
- return 1
-
- def process_request(self, request, client_address):
- """Call finish_request.
-
- Overridden by ForkingMixIn and ThreadingMixIn.
-
- """
- self.finish_request(request, client_address)
-
- def finish_request(self, request, client_address):
- """Finish one request by instantiating RequestHandlerClass."""
- self.RequestHandlerClass(request, client_address, self)
-
- def handle_error(self, request, client_address):
- """Handle an error gracefully. May be overridden.
-
- The default is to print a traceback and continue.
-
- """
- print '-'*40
- print 'Exception happened during processing of request from',
- print client_address
- import traceback
- traceback.print_exc()
- print '-'*40
-
-
-class UDPServer(TCPServer):
-
- """UDP server class."""
-
- socket_type = socket.SOCK_DGRAM
-
- max_packet_size = 8192
-
- def get_request(self):
- data, client_addr = self.socket.recvfrom(self.max_packet_size)
- return (data, self.socket), client_addr
-
- def server_activate(self):
- # No need to call listen() for UDP.
- pass
-
-
-class ForkingMixIn:
-
- """Mix-in class to handle each request in a new process."""
-
- active_children = None
- max_children = 40
-
- def collect_children(self):
- """Internal routine to wait for died children."""
- while self.active_children:
- if len(self.active_children) < self.max_children:
- options = os.WNOHANG
- else:
- # If the maximum number of children are already
- # running, block while waiting for a child to exit
- options = 0
- try:
- pid, status = os.waitpid(0, options)
- except os.error:
- pid = None
- if not pid: break
- self.active_children.remove(pid)
-
- def process_request(self, request, client_address):
- """Fork a new subprocess to process the request."""
- self.collect_children()
- pid = os.fork()
- if pid:
- # Parent process
- if self.active_children is None:
- self.active_children = []
- self.active_children.append(pid)
- return
- else:
- # Child process.
- # This must never return, hence os._exit()!
- try:
- self.socket.close()
- self.finish_request(request, client_address)
- os._exit(0)
- except:
- try:
- self.handle_error(request,
- client_address)
- finally:
- os._exit(1)
-
-
-class ThreadingMixIn:
- """Mix-in class to handle each request in a new thread."""
-
- def process_request(self, request, client_address):
- """Start a new thread to process the request."""
- import threading
- t = threading.Thread(target = self.finish_request,
- args = (request, client_address))
- t.start()
-
-
-class ForkingUDPServer(ForkingMixIn, UDPServer): pass
-class ForkingTCPServer(ForkingMixIn, TCPServer): pass
-
-class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
-class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
-
-if hasattr(socket, 'AF_UNIX'):
-
- class UnixStreamServer(TCPServer):
- address_family = socket.AF_UNIX
-
- class UnixDatagramServer(UDPServer):
- address_family = socket.AF_UNIX
-
- class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
-
- class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
-
-class BaseRequestHandler:
-
- """Base class for request handler classes.
-
- This class is instantiated for each request to be handled. The
- constructor sets the instance variables request, client_address
- and server, and then calls the handle() method. To implement a
- specific service, all you need to do is to derive a class which
- defines a handle() method.
-
- The handle() method can find the request as self.request, the
- client address as self.client_address, and the server (in case it
- needs access to per-server information) as self.server. Since a
- separate instance is created for each request, the handle() method
- can define arbitrary other instance variariables.
-
- """
-
- def __init__(self, request, client_address, server):
- self.request = request
- self.client_address = client_address
- self.server = server
- try:
- self.setup()
- self.handle()
- self.finish()
- finally:
- sys.exc_traceback = None # Help garbage collection
-
- def setup(self):
- pass
-
- def __del__(self):
- pass
-
- def handle(self):
- pass
-
- def finish(self):
- pass
-
-
-# The following two classes make it possible to use the same service
-# class for stream or datagram servers.
-# Each class sets up these instance variables:
-# - rfile: a file object from which receives the request is read
-# - wfile: a file object to which the reply is written
-# When the handle() method returns, wfile is flushed properly
-
-
-class StreamRequestHandler(BaseRequestHandler):
-
- """Define self.rfile and self.wfile for stream sockets."""
-
- # Default buffer sizes for rfile, wfile.
- # We default rfile to buffered because otherwise it could be
- # really slow for large data (a getc() call per byte); we make
- # wfile unbuffered because (a) often after a write() we want to
- # read and we need to flush the line; (b) big writes to unbuffered
- # files are typically optimized by stdio even when big reads
- # aren't.
- rbufsize = -1
- wbufsize = 0
-
- def setup(self):
- self.connection = self.request
- self.rfile = self.connection.makefile('rb', self.rbufsize)
- self.wfile = self.connection.makefile('wb', self.wbufsize)
-
- def finish(self):
- self.wfile.flush()
- self.wfile.close()
- self.rfile.close()
-
-
-class DatagramRequestHandler(BaseRequestHandler):
-
- """Define self.rfile and self.wfile for datagram sockets."""
-
- def setup(self):
- import StringIO
- self.packet, self.socket = self.request
- self.rfile = StringIO.StringIO(self.packet)
- self.wfile = StringIO.StringIO(self.packet)
-
- def finish(self):
- self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/Lib/dos-8x3/sre_comp.py b/Lib/dos-8x3/sre_comp.py
deleted file mode 100644
index dc508e5..0000000
--- a/Lib/dos-8x3/sre_comp.py
+++ /dev/null
@@ -1,381 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# convert template to internal format
-#
-# Copyright (c) 1997-2000 by Secret Labs AB. All rights reserved.
-#
-# See the sre.py file for information on usage and redistribution.
-#
-
-import _sre
-
-from sre_constants import *
-
-MAXCODE = 65535
-
-def _compile(code, pattern, flags):
- # internal: compile a (sub)pattern
- emit = code.append
- for op, av in pattern:
- if op in (LITERAL, NOT_LITERAL):
- if flags & SRE_FLAG_IGNORECASE:
- emit(OPCODES[OP_IGNORE[op]])
- else:
- emit(OPCODES[op])
- emit(av)
- elif op is IN:
- if flags & SRE_FLAG_IGNORECASE:
- emit(OPCODES[OP_IGNORE[op]])
- def fixup(literal, flags=flags):
- return _sre.getlower(literal, flags)
- else:
- emit(OPCODES[op])
- fixup = lambda x: x
- skip = len(code); emit(0)
- _compile_charset(av, flags, code, fixup)
- code[skip] = len(code) - skip
- elif op is ANY:
- if flags & SRE_FLAG_DOTALL:
- emit(OPCODES[ANY_ALL])
- else:
- emit(OPCODES[ANY])
- elif op in (REPEAT, MIN_REPEAT, MAX_REPEAT):
- if flags & SRE_FLAG_TEMPLATE:
- raise error, "internal: unsupported template operator"
- emit(OPCODES[REPEAT])
- skip = len(code); emit(0)
- emit(av[0])
- emit(av[1])
- _compile(code, av[2], flags)
- emit(OPCODES[SUCCESS])
- code[skip] = len(code) - skip
- elif _simple(av) and op == MAX_REPEAT:
- emit(OPCODES[REPEAT_ONE])
- skip = len(code); emit(0)
- emit(av[0])
- emit(av[1])
- _compile(code, av[2], flags)
- emit(OPCODES[SUCCESS])
- code[skip] = len(code) - skip
- else:
- emit(OPCODES[REPEAT])
- skip = len(code); emit(0)
- emit(av[0])
- emit(av[1])
- _compile(code, av[2], flags)
- code[skip] = len(code) - skip
- if op == MAX_REPEAT:
- emit(OPCODES[MAX_UNTIL])
- else:
- emit(OPCODES[MIN_UNTIL])
- elif op is SUBPATTERN:
- if av[0]:
- emit(OPCODES[MARK])
- emit((av[0]-1)*2)
- # _compile_info(code, av[1], flags)
- _compile(code, av[1], flags)
- if av[0]:
- emit(OPCODES[MARK])
- emit((av[0]-1)*2+1)
- elif op in (SUCCESS, FAILURE):
- emit(OPCODES[op])
- elif op in (ASSERT, ASSERT_NOT):
- emit(OPCODES[op])
- skip = len(code); emit(0)
- if av[0] >= 0:
- emit(0) # look ahead
- else:
- lo, hi = av[1].getwidth()
- if lo != hi:
- raise error, "look-behind requires fixed-width pattern"
- emit(lo) # look behind
- _compile(code, av[1], flags)
- emit(OPCODES[SUCCESS])
- code[skip] = len(code) - skip
- elif op is CALL:
- emit(OPCODES[op])
- skip = len(code); emit(0)
- _compile(code, av, flags)
- emit(OPCODES[SUCCESS])
- code[skip] = len(code) - skip
- elif op is AT:
- emit(OPCODES[op])
- if flags & SRE_FLAG_MULTILINE:
- emit(ATCODES[AT_MULTILINE.get(av, av)])
- else:
- emit(ATCODES[av])
- elif op is BRANCH:
- emit(OPCODES[op])
- tail = []
- for av in av[1]:
- skip = len(code); emit(0)
- # _compile_info(code, av, flags)
- _compile(code, av, flags)
- emit(OPCODES[JUMP])
- tail.append(len(code)); emit(0)
- code[skip] = len(code) - skip
- emit(0) # end of branch
- for tail in tail:
- code[tail] = len(code) - tail
- elif op is CATEGORY:
- emit(OPCODES[op])
- if flags & SRE_FLAG_LOCALE:
- emit(CHCODES[CH_LOCALE[av]])
- elif flags & SRE_FLAG_UNICODE:
- emit(CHCODES[CH_UNICODE[av]])
- else:
- emit(CHCODES[av])
- elif op is GROUPREF:
- if flags & SRE_FLAG_IGNORECASE:
- emit(OPCODES[OP_IGNORE[op]])
- else:
- emit(OPCODES[op])
- emit(av-1)
- else:
- raise ValueError, ("unsupported operand type", op)
-
-def _compile_charset(charset, flags, code, fixup=None):
- # compile charset subprogram
- emit = code.append
- if not fixup:
- fixup = lambda x: x
- for op, av in _optimize_charset(charset, fixup):
- emit(OPCODES[op])
- if op is NEGATE:
- pass
- elif op is LITERAL:
- emit(fixup(av))
- elif op is RANGE:
- emit(fixup(av[0]))
- emit(fixup(av[1]))
- elif op is CHARSET:
- code.extend(av)
- elif op is CATEGORY:
- if flags & SRE_FLAG_LOCALE:
- emit(CHCODES[CH_LOCALE[av]])
- elif flags & SRE_FLAG_UNICODE:
- emit(CHCODES[CH_UNICODE[av]])
- else:
- emit(CHCODES[av])
- else:
- raise error, "internal: unsupported set operator"
- emit(OPCODES[FAILURE])
-
-def _optimize_charset(charset, fixup):
- # internal: optimize character set
- out = []
- charmap = [0]*256
- try:
- for op, av in charset:
- if op is NEGATE:
- out.append((op, av))
- elif op is LITERAL:
- charmap[fixup(av)] = 1
- elif op is RANGE:
- for i in range(fixup(av[0]), fixup(av[1])+1):
- charmap[i] = 1
- elif op is CATEGORY:
- # FIXME: could append to charmap tail
- return charset # cannot compress
- except IndexError:
- # character set contains unicode characters
- return charset
- # compress character map
- i = p = n = 0
- runs = []
- for c in charmap:
- if c:
- if n == 0:
- p = i
- n = n + 1
- elif n:
- runs.append((p, n))
- n = 0
- i = i + 1
- if n:
- runs.append((p, n))
- if len(runs) <= 2:
- # use literal/range
- for p, n in runs:
- if n == 1:
- out.append((LITERAL, p))
- else:
- out.append((RANGE, (p, p+n-1)))
- if len(out) < len(charset):
- return out
- else:
- # use bitmap
- data = []
- m = 1; v = 0
- for c in charmap:
- if c:
- v = v + m
- m = m << 1
- if m > MAXCODE:
- data.append(v)
- m = 1; v = 0
- out.append((CHARSET, data))
- return out
- return charset
-
-def _simple(av):
- # check if av is a "simple" operator
- lo, hi = av[2].getwidth()
- if lo == 0 and hi == MAXREPEAT:
- raise error, "nothing to repeat"
- return lo == hi == 1 and av[2][0][0] != SUBPATTERN
-
-def _compile_info(code, pattern, flags):
- # internal: compile an info block. in the current version,
- # this contains min/max pattern width, and an optional literal
- # prefix or a character map
- lo, hi = pattern.getwidth()
- if lo == 0:
- return # not worth it
- # look for a literal prefix
- prefix = []
- prefix_skip = 0
- charset = [] # not used
- if not (flags & SRE_FLAG_IGNORECASE):
- # look for literal prefix
- for op, av in pattern.data:
- if op is LITERAL:
- if len(prefix) == prefix_skip:
- prefix_skip = prefix_skip + 1
- prefix.append(av)
- elif op is SUBPATTERN and len(av[1]) == 1:
- op, av = av[1][0]
- if op is LITERAL:
- prefix.append(av)
- else:
- break
- else:
- break
- # if no prefix, look for charset prefix
- if not prefix and pattern.data:
- op, av = pattern.data[0]
- if op is SUBPATTERN and av[1]:
- op, av = av[1][0]
- if op is LITERAL:
- charset.append((op, av))
- elif op is BRANCH:
- c = []
- for p in av[1]:
- if not p:
- break
- op, av = p[0]
- if op is LITERAL:
- c.append((op, av))
- else:
- break
- else:
- charset = c
- elif op is BRANCH:
- c = []
- for p in av[1]:
- if not p:
- break
- op, av = p[0]
- if op is LITERAL:
- c.append((op, av))
- else:
- break
- else:
- charset = c
- elif op is IN:
- charset = av
-## if prefix:
-## print "*** PREFIX", prefix, prefix_skip
-## if charset:
-## print "*** CHARSET", charset
- # add an info block
- emit = code.append
- emit(OPCODES[INFO])
- skip = len(code); emit(0)
- # literal flag
- mask = 0
- if prefix:
- mask = SRE_INFO_PREFIX
- if len(prefix) == prefix_skip == len(pattern.data):
- mask = mask + SRE_INFO_LITERAL
- elif charset:
- mask = mask + SRE_INFO_CHARSET
- emit(mask)
- # pattern length
- if lo < MAXCODE:
- emit(lo)
- else:
- emit(MAXCODE)
- prefix = prefix[:MAXCODE]
- if hi < MAXCODE:
- emit(hi)
- else:
- emit(0)
- # add literal prefix
- if prefix:
- emit(len(prefix)) # length
- emit(prefix_skip) # skip
- code.extend(prefix)
- # generate overlap table
- table = [-1] + ([0]*len(prefix))
- for i in range(len(prefix)):
- table[i+1] = table[i]+1
- while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
- table[i+1] = table[table[i+1]-1]+1
- code.extend(table[1:]) # don't store first entry
- elif charset:
- _compile_charset(charset, 0, code)
- code[skip] = len(code) - skip
-
-STRING_TYPES = [type("")]
-
-try:
- STRING_TYPES.append(type(unicode("")))
-except NameError:
- pass
-
-def _code(p, flags):
-
- flags = p.pattern.flags | flags
- code = []
-
- # compile info block
- _compile_info(code, p, flags)
-
- # compile the pattern
- _compile(code, p.data, flags)
-
- code.append(OPCODES[SUCCESS])
-
- return code
-
-def compile(p, flags=0):
- # internal: convert pattern list to internal format
-
- if type(p) in STRING_TYPES:
- import sre_parse
- pattern = p
- p = sre_parse.parse(p, flags)
- else:
- pattern = None
-
- code = _code(p, flags)
-
- # print code
-
- # FIXME: <fl> get rid of this limitation!
- assert p.pattern.groups <= 100,\
- "sorry, but this version only supports 100 named groups"
-
- # map in either direction
- groupindex = p.pattern.groupdict
- indexgroup = [None] * p.pattern.groups
- for k, i in groupindex.items():
- indexgroup[i] = k
-
- return _sre.compile(
- pattern, flags, code,
- p.pattern.groups-1,
- groupindex, indexgroup
- )
diff --git a/Lib/dos-8x3/sre_cons.py b/Lib/dos-8x3/sre_cons.py
deleted file mode 100644
index ea649c0..0000000
--- a/Lib/dos-8x3/sre_cons.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# various symbols used by the regular expression engine.
-# run this script to update the _sre include files!
-#
-# Copyright (c) 1998-2000 by Secret Labs AB. All rights reserved.
-#
-# See the sre.py file for information on usage and redistribution.
-#
-
-MAXREPEAT = 65535
-
-# should this really be here?
-
-class error(Exception):
- pass
-
-# operators
-
-FAILURE = "failure"
-SUCCESS = "success"
-
-ANY = "any"
-ANY_ALL = "any_all"
-ASSERT = "assert"
-ASSERT_NOT = "assert_not"
-AT = "at"
-BRANCH = "branch"
-CALL = "call"
-CATEGORY = "category"
-CHARSET = "charset"
-GROUPREF = "groupref"
-GROUPREF_IGNORE = "groupref_ignore"
-IN = "in"
-IN_IGNORE = "in_ignore"
-INFO = "info"
-JUMP = "jump"
-LITERAL = "literal"
-LITERAL_IGNORE = "literal_ignore"
-MARK = "mark"
-MAX_REPEAT = "max_repeat"
-MAX_UNTIL = "max_until"
-MIN_REPEAT = "min_repeat"
-MIN_UNTIL = "min_until"
-NEGATE = "negate"
-NOT_LITERAL = "not_literal"
-NOT_LITERAL_IGNORE = "not_literal_ignore"
-RANGE = "range"
-REPEAT = "repeat"
-REPEAT_ONE = "repeat_one"
-SUBPATTERN = "subpattern"
-
-# positions
-AT_BEGINNING = "at_beginning"
-AT_BEGINNING_LINE = "at_beginning_line"
-AT_BOUNDARY = "at_boundary"
-AT_NON_BOUNDARY = "at_non_boundary"
-AT_END = "at_end"
-AT_END_LINE = "at_end_line"
-
-# categories
-CATEGORY_DIGIT = "category_digit"
-CATEGORY_NOT_DIGIT = "category_not_digit"
-CATEGORY_SPACE = "category_space"
-CATEGORY_NOT_SPACE = "category_not_space"
-CATEGORY_WORD = "category_word"
-CATEGORY_NOT_WORD = "category_not_word"
-CATEGORY_LINEBREAK = "category_linebreak"
-CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
-CATEGORY_LOC_WORD = "category_loc_word"
-CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
-CATEGORY_UNI_DIGIT = "category_uni_digit"
-CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
-CATEGORY_UNI_SPACE = "category_uni_space"
-CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
-CATEGORY_UNI_WORD = "category_uni_word"
-CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
-CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
-CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
-
-OPCODES = [
-
- # failure=0 success=1 (just because it looks better that way :-)
- FAILURE, SUCCESS,
-
- ANY, ANY_ALL,
- ASSERT, ASSERT_NOT,
- AT,
- BRANCH,
- CALL,
- CATEGORY,
- CHARSET,
- GROUPREF, GROUPREF_IGNORE,
- IN, IN_IGNORE,
- INFO,
- JUMP,
- LITERAL, LITERAL_IGNORE,
- MARK,
- MAX_UNTIL,
- MIN_UNTIL,
- NOT_LITERAL, NOT_LITERAL_IGNORE,
- NEGATE,
- RANGE,
- REPEAT,
- REPEAT_ONE,
- SUBPATTERN
-
-]
-
-ATCODES = [
- AT_BEGINNING, AT_BEGINNING_LINE, AT_BOUNDARY,
- AT_NON_BOUNDARY, AT_END, AT_END_LINE
-]
-
-CHCODES = [
- CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
- CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
- CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
- CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
- CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
- CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
- CATEGORY_UNI_NOT_LINEBREAK
-]
-
-def makedict(list):
- d = {}
- i = 0
- for item in list:
- d[item] = i
- i = i + 1
- return d
-
-OPCODES = makedict(OPCODES)
-ATCODES = makedict(ATCODES)
-CHCODES = makedict(CHCODES)
-
-# replacement operations for "ignore case" mode
-OP_IGNORE = {
- GROUPREF: GROUPREF_IGNORE,
- IN: IN_IGNORE,
- LITERAL: LITERAL_IGNORE,
- NOT_LITERAL: NOT_LITERAL_IGNORE
-}
-
-AT_MULTILINE = {
- AT_BEGINNING: AT_BEGINNING_LINE,
- AT_END: AT_END_LINE
-}
-
-CH_LOCALE = {
- CATEGORY_DIGIT: CATEGORY_DIGIT,
- CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
- CATEGORY_SPACE: CATEGORY_SPACE,
- CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
- CATEGORY_WORD: CATEGORY_LOC_WORD,
- CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
- CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
- CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
-}
-
-CH_UNICODE = {
- CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
- CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
- CATEGORY_SPACE: CATEGORY_UNI_SPACE,
- CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
- CATEGORY_WORD: CATEGORY_UNI_WORD,
- CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
- CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
- CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
-}
-
-# flags
-SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
-SRE_FLAG_IGNORECASE = 2 # case insensitive
-SRE_FLAG_LOCALE = 4 # honour system locale
-SRE_FLAG_MULTILINE = 8 # treat target as multiline string
-SRE_FLAG_DOTALL = 16 # treat target as a single string
-SRE_FLAG_UNICODE = 32 # use unicode locale
-SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
-
-# flags for INFO primitive
-SRE_INFO_PREFIX = 1 # has prefix
-SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
-SRE_INFO_CHARSET = 4 # pattern starts with character from given set
-
-if __name__ == "__main__":
- import string
- def dump(f, d, prefix):
- items = d.items()
- items.sort(lambda a, b: cmp(a[1], b[1]))
- for k, v in items:
- f.write("#define %s_%s %s\n" % (prefix, string.upper(k), v))
- f = open("sre_constants.h", "w")
- f.write("""\
-/*
- * Secret Labs' Regular Expression Engine
- *
- * regular expression matching engine
- *
- * NOTE: This file is generated by sre_constants.py. If you need
- * to change anything in here, edit sre_constants.py and run it.
- *
- * Copyright (c) 1997-2000 by Secret Labs AB. All rights reserved.
- *
- * See the _sre.c file for information on usage and redistribution.
- */
-
-""")
-
- dump(f, OPCODES, "SRE_OP")
- dump(f, ATCODES, "SRE")
- dump(f, CHCODES, "SRE")
-
- f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
- f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
- f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
- f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
- f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
- f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
- f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
-
- f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
- f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
- f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
-
- f.close()
- print "done"
diff --git a/Lib/dos-8x3/sre_pars.py b/Lib/dos-8x3/sre_pars.py
deleted file mode 100644
index 7c36d4f..0000000
--- a/Lib/dos-8x3/sre_pars.py
+++ /dev/null
@@ -1,682 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# convert re-style regular expression to sre pattern
-#
-# Copyright (c) 1998-2000 by Secret Labs AB. All rights reserved.
-#
-# See the sre.py file for information on usage and redistribution.
-#
-
-import string, sys
-
-from sre_constants import *
-
-SPECIAL_CHARS = ".\\[{()*+?^$|"
-REPEAT_CHARS = "*+?{"
-
-DIGITS = tuple("0123456789")
-
-OCTDIGITS = tuple("01234567")
-HEXDIGITS = tuple("0123456789abcdefABCDEF")
-
-WHITESPACE = tuple(" \t\n\r\v\f")
-
-ESCAPES = {
- r"\a": (LITERAL, 7),
- r"\b": (LITERAL, 8),
- r"\f": (LITERAL, 12),
- r"\n": (LITERAL, 10),
- r"\r": (LITERAL, 13),
- r"\t": (LITERAL, 9),
- r"\v": (LITERAL, 11),
- r"\\": (LITERAL, ord("\\"))
-}
-
-CATEGORIES = {
- r"\A": (AT, AT_BEGINNING), # start of string
- r"\b": (AT, AT_BOUNDARY),
- r"\B": (AT, AT_NON_BOUNDARY),
- r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
- r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
- r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
- r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
- r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
- r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
- r"\Z": (AT, AT_END), # end of string
-}
-
-FLAGS = {
- # standard flags
- "i": SRE_FLAG_IGNORECASE,
- "L": SRE_FLAG_LOCALE,
- "m": SRE_FLAG_MULTILINE,
- "s": SRE_FLAG_DOTALL,
- "x": SRE_FLAG_VERBOSE,
- # extensions
- "t": SRE_FLAG_TEMPLATE,
- "u": SRE_FLAG_UNICODE,
-}
-
-class Pattern:
- # master pattern object. keeps track of global attributes
- def __init__(self):
- self.flags = 0
- self.groups = 1
- self.groupdict = {}
- def getgroup(self, name=None):
- gid = self.groups
- self.groups = gid + 1
- if name:
- self.groupdict[name] = gid
- return gid
-
-class SubPattern:
- # a subpattern, in intermediate form
- def __init__(self, pattern, data=None):
- self.pattern = pattern
- if not data:
- data = []
- self.data = data
- self.width = None
- def dump(self, level=0):
- nl = 1
- for op, av in self.data:
- print level*" " + op,; nl = 0
- if op == "in":
- # member sublanguage
- print; nl = 1
- for op, a in av:
- print (level+1)*" " + op, a
- elif op == "branch":
- print; nl = 1
- i = 0
- for a in av[1]:
- if i > 0:
- print level*" " + "or"
- a.dump(level+1); nl = 1
- i = i + 1
- elif type(av) in (type(()), type([])):
- for a in av:
- if isinstance(a, SubPattern):
- if not nl: print
- a.dump(level+1); nl = 1
- else:
- print a, ; nl = 0
- else:
- print av, ; nl = 0
- if not nl: print
- def __repr__(self):
- return repr(self.data)
- def __len__(self):
- return len(self.data)
- def __delitem__(self, index):
- del self.data[index]
- def __getitem__(self, index):
- return self.data[index]
- def __setitem__(self, index, code):
- self.data[index] = code
- def __getslice__(self, start, stop):
- return SubPattern(self.pattern, self.data[start:stop])
- def insert(self, index, code):
- self.data.insert(index, code)
- def append(self, code):
- self.data.append(code)
- def getwidth(self):
- # determine the width (min, max) for this subpattern
- if self.width:
- return self.width
- lo = hi = 0L
- for op, av in self.data:
- if op is BRANCH:
- i = sys.maxint
- j = 0
- for av in av[1]:
- l, h = av.getwidth()
- i = min(i, l)
- j = max(j, h)
- lo = lo + i
- hi = hi + j
- elif op is CALL:
- i, j = av.getwidth()
- lo = lo + i
- hi = hi + j
- elif op is SUBPATTERN:
- i, j = av[1].getwidth()
- lo = lo + i
- hi = hi + j
- elif op in (MIN_REPEAT, MAX_REPEAT):
- i, j = av[2].getwidth()
- lo = lo + long(i) * av[0]
- hi = hi + long(j) * av[1]
- elif op in (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY):
- lo = lo + 1
- hi = hi + 1
- elif op == SUCCESS:
- break
- self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
- return self.width
-
-class Tokenizer:
- def __init__(self, string):
- self.string = string
- self.index = 0
- self.__next()
- def __next(self):
- if self.index >= len(self.string):
- self.next = None
- return
- char = self.string[self.index]
- if char[0] == "\\":
- try:
- c = self.string[self.index + 1]
- except IndexError:
- raise error, "bogus escape"
- char = char + c
- self.index = self.index + len(char)
- self.next = char
- def match(self, char, skip=1):
- if char == self.next:
- if skip:
- self.__next()
- return 1
- return 0
- def get(self):
- this = self.next
- self.__next()
- return this
- def tell(self):
- return self.index, self.next
- def seek(self, index):
- self.index, self.next = index
-
-def isident(char):
- return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
-
-def isdigit(char):
- return "0" <= char <= "9"
-
-def isname(name):
- # check that group name is a valid string
- if not isident(name[0]):
- return 0
- for char in name:
- if not isident(char) and not isdigit(char):
- return 0
- return 1
-
-def _group(escape, groups):
- # check if the escape string represents a valid group
- try:
- gid = int(escape[1:])
- if gid and gid < groups:
- return gid
- except ValueError:
- pass
- return None # not a valid group
-
-def _class_escape(source, escape):
- # handle escape code inside character class
- code = ESCAPES.get(escape)
- if code:
- return code
- code = CATEGORIES.get(escape)
- if code:
- return code
- try:
- if escape[1:2] == "x":
- # hexadecimal escape (exactly two digits)
- while source.next in HEXDIGITS and len(escape) < 4:
- escape = escape + source.get()
- escape = escape[2:]
- if len(escape) != 2:
- raise error, "bogus escape: %s" % repr("\\" + escape)
- return LITERAL, int(escape, 16) & 0xff
- elif str(escape[1:2]) in OCTDIGITS:
- # octal escape (up to three digits)
- while source.next in OCTDIGITS and len(escape) < 5:
- escape = escape + source.get()
- escape = escape[1:]
- return LITERAL, int(escape, 8) & 0xff
- if len(escape) == 2:
- return LITERAL, ord(escape[1])
- except ValueError:
- pass
- raise error, "bogus escape: %s" % repr(escape)
-
-def _escape(source, escape, state):
- # handle escape code in expression
- code = CATEGORIES.get(escape)
- if code:
- return code
- code = ESCAPES.get(escape)
- if code:
- return code
- try:
- if escape[1:2] == "x":
- # hexadecimal escape
- while source.next in HEXDIGITS and len(escape) < 4:
- escape = escape + source.get()
- if len(escape) != 4:
- raise ValueError
- return LITERAL, int(escape[2:], 16) & 0xff
- elif escape[1:2] == "0":
- # octal escape
- while source.next in OCTDIGITS and len(escape) < 4:
- escape = escape + source.get()
- return LITERAL, int(escape[1:], 8) & 0xff
- elif escape[1:2] in DIGITS:
- # octal escape *or* decimal group reference (sigh)
- here = source.tell()
- if source.next in DIGITS:
- escape = escape + source.get()
- if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
- source.next in OCTDIGITS):
- # got three octal digits; this is an octal escape
- escape = escape + source.get()
- return LITERAL, int(escape[1:], 8) & 0xff
- # got at least one decimal digit; this is a group reference
- group = _group(escape, state.groups)
- if group:
- return GROUPREF, group
- raise ValueError
- if len(escape) == 2:
- return LITERAL, ord(escape[1])
- except ValueError:
- pass
- raise error, "bogus escape: %s" % repr(escape)
-
-def _parse_sub(source, state, nested=1):
- # parse an alternation: a|b|c
-
- items = []
- while 1:
- items.append(_parse(source, state))
- if source.match("|"):
- continue
- if not nested:
- break
- if not source.next or source.match(")", 0):
- break
- else:
- raise error, "pattern not properly closed"
-
- if len(items) == 1:
- return items[0]
-
- subpattern = SubPattern(state)
-
- # check if all items share a common prefix
- while 1:
- prefix = None
- for item in items:
- if not item:
- break
- if prefix is None:
- prefix = item[0]
- elif item[0] != prefix:
- break
- else:
- # all subitems start with a common "prefix".
- # move it out of the branch
- for item in items:
- del item[0]
- subpattern.append(prefix)
- continue # check next one
- break
-
- # check if the branch can be replaced by a character set
- for item in items:
- if len(item) != 1 or item[0][0] != LITERAL:
- break
- else:
- # we can store this as a character set instead of a
- # branch (the compiler may optimize this even more)
- set = []
- for item in items:
- set.append(item[0])
- subpattern.append((IN, set))
- return subpattern
-
- subpattern.append((BRANCH, (None, items)))
- return subpattern
-
-def _parse(source, state):
- # parse a simple pattern
-
- subpattern = SubPattern(state)
-
- while 1:
-
- if source.next in ("|", ")"):
- break # end of subpattern
- this = source.get()
- if this is None:
- break # end of pattern
-
- if state.flags & SRE_FLAG_VERBOSE:
- # skip whitespace and comments
- if this in WHITESPACE:
- continue
- if this == "#":
- while 1:
- this = source.get()
- if this in (None, "\n"):
- break
- continue
-
- if this and this[0] not in SPECIAL_CHARS:
- subpattern.append((LITERAL, ord(this)))
-
- elif this == "[":
- # character set
- set = []
-## if source.match(":"):
-## pass # handle character classes
- if source.match("^"):
- set.append((NEGATE, None))
- # check remaining characters
- start = set[:]
- while 1:
- this = source.get()
- if this == "]" and set != start:
- break
- elif this and this[0] == "\\":
- code1 = _class_escape(source, this)
- elif this:
- code1 = LITERAL, ord(this)
- else:
- raise error, "unexpected end of regular expression"
- if source.match("-"):
- # potential range
- this = source.get()
- if this == "]":
- if code1[0] is IN:
- code1 = code1[1][0]
- set.append(code1)
- set.append((LITERAL, ord("-")))
- break
- else:
- if this[0] == "\\":
- code2 = _class_escape(source, this)
- else:
- code2 = LITERAL, ord(this)
- if code1[0] != LITERAL or code2[0] != LITERAL:
- raise error, "illegal range"
- lo = code1[1]
- hi = code2[1]
- if hi < lo:
- raise error, "illegal range"
- set.append((RANGE, (lo, hi)))
- else:
- if code1[0] is IN:
- code1 = code1[1][0]
- set.append(code1)
-
- # FIXME: <fl> move set optimization to compiler!
- if len(set)==1 and set[0][0] is LITERAL:
- subpattern.append(set[0]) # optimization
- elif len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
- subpattern.append((NOT_LITERAL, set[1][1])) # optimization
- else:
- # FIXME: <fl> add charmap optimization
- subpattern.append((IN, set))
-
- elif this and this[0] in REPEAT_CHARS:
- # repeat previous item
- if this == "?":
- min, max = 0, 1
- elif this == "*":
- min, max = 0, MAXREPEAT
- elif this == "+":
- min, max = 1, MAXREPEAT
- elif this == "{":
- here = source.tell()
- min, max = 0, MAXREPEAT
- lo = hi = ""
- while source.next in DIGITS:
- lo = lo + source.get()
- if source.match(","):
- while source.next in DIGITS:
- hi = hi + source.get()
- else:
- hi = lo
- if not source.match("}"):
- subpattern.append((LITERAL, ord(this)))
- source.seek(here)
- continue
- if lo:
- min = int(lo)
- if hi:
- max = int(hi)
- # FIXME: <fl> check that hi >= lo!
- else:
- raise error, "not supported"
- # figure out which item to repeat
- if subpattern:
- item = subpattern[-1:]
- else:
- raise error, "nothing to repeat"
- if source.match("?"):
- subpattern[-1] = (MIN_REPEAT, (min, max, item))
- else:
- subpattern[-1] = (MAX_REPEAT, (min, max, item))
-
- elif this == ".":
- subpattern.append((ANY, None))
-
- elif this == "(":
- group = 1
- name = None
- if source.match("?"):
- group = 0
- # options
- if source.match("P"):
- # python extensions
- if source.match("<"):
- # named group: skip forward to end of name
- name = ""
- while 1:
- char = source.get()
- if char is None:
- raise error, "unterminated name"
- if char == ">":
- break
- name = name + char
- group = 1
- if not isname(name):
- raise error, "illegal character in group name"
- elif source.match("="):
- # named backreference
- name = ""
- while 1:
- char = source.get()
- if char is None:
- raise error, "unterminated name"
- if char == ")":
- break
- name = name + char
- if not isname(name):
- raise error, "illegal character in group name"
- gid = state.groupdict.get(name)
- if gid is None:
- raise error, "unknown group name"
- subpattern.append((GROUPREF, gid))
- continue
- else:
- char = source.get()
- if char is None:
- raise error, "unexpected end of pattern"
- raise error, "unknown specifier: ?P%s" % char
- elif source.match(":"):
- # non-capturing group
- group = 2
- elif source.match("#"):
- # comment
- while 1:
- if source.next is None or source.next == ")":
- break
- source.get()
- if not source.match(")"):
- raise error, "unbalanced parenthesis"
- continue
- elif source.next in ("=", "!", "<"):
- # lookahead assertions
- char = source.get()
- dir = 1
- if char == "<":
- if source.next not in ("=", "!"):
- raise error, "syntax error"
- dir = -1 # lookbehind
- char = source.get()
- p = _parse_sub(source, state)
- if not source.match(")"):
- raise error, "unbalanced parenthesis"
- if char == "=":
- subpattern.append((ASSERT, (dir, p)))
- else:
- subpattern.append((ASSERT_NOT, (dir, p)))
- continue
- else:
- # flags
- while FLAGS.has_key(source.next):
- state.flags = state.flags | FLAGS[source.get()]
- if group:
- # parse group contents
- if group == 2:
- # anonymous group
- group = None
- else:
- group = state.getgroup(name)
- p = _parse_sub(source, state)
- if not source.match(")"):
- raise error, "unbalanced parenthesis"
- subpattern.append((SUBPATTERN, (group, p)))
- else:
- while 1:
- char = source.get()
- if char is None or char == ")":
- break
- raise error, "unknown extension"
-
- elif this == "^":
- subpattern.append((AT, AT_BEGINNING))
-
- elif this == "$":
- subpattern.append((AT, AT_END))
-
- elif this and this[0] == "\\":
- code = _escape(source, this, state)
- subpattern.append(code)
-
- else:
- raise error, "parser error"
-
- return subpattern
-
-def parse(str, flags=0, pattern=None):
- # parse 're' pattern into list of (opcode, argument) tuples
-
- source = Tokenizer(str)
-
- if pattern is None:
- pattern = Pattern()
- pattern.flags = flags
-
- p = _parse_sub(source, pattern, 0)
-
- tail = source.get()
- if tail == ")":
- raise error, "unbalanced parenthesis"
- elif tail:
- raise error, "bogus characters at end of regular expression"
-
- # p.dump()
-
- if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
- # the VERBOSE flag was switched on inside the pattern. to be
- # on the safe side, we'll parse the whole thing again...
- return parse(str, p.pattern.flags)
-
- return p
-
-def parse_template(source, pattern):
- # parse 're' replacement string into list of literals and
- # group references
- s = Tokenizer(source)
- p = []
- a = p.append
- while 1:
- this = s.get()
- if this is None:
- break # end of replacement string
- if this and this[0] == "\\":
- # group
- if this == "\\g":
- name = ""
- if s.match("<"):
- while 1:
- char = s.get()
- if char is None:
- raise error, "unterminated group name"
- if char == ">":
- break
- name = name + char
- if not name:
- raise error, "bad group name"
- try:
- index = int(name)
- except ValueError:
- if not isname(name):
- raise error, "illegal character in group name"
- try:
- index = pattern.groupindex[name]
- except KeyError:
- raise IndexError, "unknown group name"
- a((MARK, index))
- elif len(this) > 1 and this[1] in DIGITS:
- code = None
- while 1:
- group = _group(this, pattern.groups+1)
- if group:
- if (s.next not in DIGITS or
- not _group(this + s.next, pattern.groups+1)):
- code = MARK, int(group)
- break
- elif s.next in OCTDIGITS:
- this = this + s.get()
- else:
- break
- if not code:
- this = this[1:]
- code = LITERAL, int(this[-6:], 8) & 0xff
- a(code)
- else:
- try:
- a(ESCAPES[this])
- except KeyError:
- for c in this:
- a((LITERAL, ord(c)))
- else:
- a((LITERAL, ord(this)))
- return p
-
-def expand_template(template, match):
- # FIXME: <fl> this is sooooo slow. drop in the slicelist
- # code instead
- p = []
- a = p.append
- sep = match.string[:0]
- if type(sep) is type(""):
- char = chr
- else:
- char = unichr
- for c, s in template:
- if c is LITERAL:
- a(char(s))
- elif c is MARK:
- s = match.group(s)
- if s is None:
- raise error, "empty group"
- a(s)
- return string.join(p, sep)
diff --git a/Lib/dos-8x3/statcach.py b/Lib/dos-8x3/statcach.py
deleted file mode 100755
index b5147c2..0000000
--- a/Lib/dos-8x3/statcach.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""Maintain a cache of stat() information on files.
-
-There are functions to reset the cache or to selectively remove items.
-"""
-
-import os
-from stat import *
-
-# The cache.
-# Keys are pathnames, values are `os.stat' outcomes.
-#
-cache = {}
-
-
-def stat(path):
- """Stat a file, possibly out of the cache."""
- if cache.has_key(path):
- return cache[path]
- cache[path] = ret = os.stat(path)
- return ret
-
-
-def reset():
- """Reset the cache completely."""
- global cache
- cache = {}
-
-
-def forget(path):
- """Remove a given item from the cache, if it exists."""
- if cache.has_key(path):
- del cache[path]
-
-
-def forget_prefix(prefix):
- """Remove all pathnames with a given prefix."""
- n = len(prefix)
- for path in cache.keys():
- if path[:n] == prefix:
- del cache[path]
-
-
-def forget_dir(prefix):
- """Forget about a directory and all entries in it, but not about
- entries in subdirectories."""
- if prefix[-1:] == '/' and prefix <> '/':
- prefix = prefix[:-1]
- forget(prefix)
- if prefix[-1:] <> '/':
- prefix = prefix + '/'
- n = len(prefix)
- for path in cache.keys():
- if path[:n] == prefix:
- rest = path[n:]
- if rest[-1:] == '/': rest = rest[:-1]
- if '/' not in rest:
- del cache[path]
-
-
-def forget_except_prefix(prefix):
- """Remove all pathnames except with a given prefix.
- Normally used with prefix = '/' after a chdir()."""
- n = len(prefix)
- for path in cache.keys():
- if path[:n] <> prefix:
- del cache[path]
-
-
-def isdir(path):
- """Check for directory."""
- try:
- st = stat(path)
- except os.error:
- return 0
- return S_ISDIR(st[ST_MODE])
diff --git a/Lib/dos-8x3/string_t.py b/Lib/dos-8x3/string_t.py
deleted file mode 100644
index d4041be..0000000
--- a/Lib/dos-8x3/string_t.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""Common tests shared by test_string and test_userstring"""
-
-import string
-
-transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
-
-from UserList import UserList
-
-class Sequence:
- def __init__(self): self.seq = 'wxyz'
- def __len__(self): return len(self.seq)
- def __getitem__(self, i): return self.seq[i]
-
-class BadSeq1(Sequence):
- def __init__(self): self.seq = [7, 'hello', 123L]
-
-class BadSeq2(Sequence):
- def __init__(self): self.seq = ['a', 'b', 'c']
- def __len__(self): return 8
-
-def run_module_tests(test):
- """Run all tests that exercise a function in the string module"""
-
- test('atoi', " 1 ", 1)
- test('atoi', " 1x", ValueError)
- test('atoi', " x1 ", ValueError)
- test('atol', " 1 ", 1L)
- test('atol', " 1x ", ValueError)
- test('atol', " x1 ", ValueError)
- test('atof', " 1 ", 1.0)
- test('atof', " 1x ", ValueError)
- test('atof', " x1 ", ValueError)
-
- test('maketrans', 'abc', transtable, 'xyz')
- test('maketrans', 'abc', ValueError, 'xyzq')
-
- # join now works with any sequence type
- test('join', ['a', 'b', 'c', 'd'], 'a b c d')
- test('join', ('a', 'b', 'c', 'd'), 'abcd', '')
- test('join', Sequence(), 'w x y z')
- test('join', 7, TypeError)
-
- test('join', BadSeq1(), TypeError)
- test('join', BadSeq2(), 'a b c')
-
- # try a few long ones
- print string.join(['x' * 100] * 100, ':')
- print string.join(('x' * 100,) * 100, ':')
-
-
-def run_method_tests(test):
- """Run all tests that exercise a method of a string object"""
-
- test('capitalize', ' hello ', ' hello ')
- test('capitalize', 'hello ', 'Hello ')
- test('find', 'abcdefghiabc', 0, 'abc')
- test('find', 'abcdefghiabc', 9, 'abc', 1)
- test('find', 'abcdefghiabc', -1, 'def', 4)
- test('rfind', 'abcdefghiabc', 9, 'abc')
- test('lower', 'HeLLo', 'hello')
- test('lower', 'hello', 'hello')
- test('upper', 'HeLLo', 'HELLO')
- test('upper', 'HELLO', 'HELLO')
-
- test('title', ' hello ', ' Hello ')
- test('title', 'hello ', 'Hello ')
- test('title', "fOrMaT thIs aS titLe String", 'Format This As Title String')
- test('title', "fOrMaT,thIs-aS*titLe;String", 'Format,This-As*Title;String')
- test('title', "getInt", 'Getint')
-
- test('expandtabs', 'abc\rab\tdef\ng\thi', 'abc\rab def\ng hi')
- test('expandtabs', 'abc\rab\tdef\ng\thi', 'abc\rab def\ng hi', 8)
- test('expandtabs', 'abc\rab\tdef\ng\thi', 'abc\rab def\ng hi', 4)
- test('expandtabs', 'abc\r\nab\tdef\ng\thi', 'abc\r\nab def\ng hi', 4)
-
- test('islower', 'a', 1)
- test('islower', 'A', 0)
- test('islower', '\n', 0)
- test('islower', 'abc', 1)
- test('islower', 'aBc', 0)
- test('islower', 'abc\n', 1)
-
- test('isupper', 'a', 0)
- test('isupper', 'A', 1)
- test('isupper', '\n', 0)
- test('isupper', 'ABC', 1)
- test('isupper', 'AbC', 0)
- test('isupper', 'ABC\n', 1)
-
- test('istitle', 'a', 0)
- test('istitle', 'A', 1)
- test('istitle', '\n', 0)
- test('istitle', 'A Titlecased Line', 1)
- test('istitle', 'A\nTitlecased Line', 1)
- test('istitle', 'A Titlecased, Line', 1)
- test('istitle', 'Not a capitalized String', 0)
- test('istitle', 'Not\ta Titlecase String', 0)
- test('istitle', 'Not--a Titlecase String', 0)
-
- test('isalpha', 'a', 1)
- test('isalpha', 'A', 1)
- test('isalpha', '\n', 0)
- test('isalpha', 'abc', 1)
- test('isalpha', 'aBc123', 0)
- test('isalpha', 'abc\n', 0)
-
- test('isalnum', 'a', 1)
- test('isalnum', 'A', 1)
- test('isalnum', '\n', 0)
- test('isalnum', '123abc456', 1)
- test('isalnum', 'a1b3c', 1)
- test('isalnum', 'aBc000 ', 0)
- test('isalnum', 'abc\n', 0)
-
- # join now works with any sequence type
- test('join', ' ', 'a b c d', ['a', 'b', 'c', 'd'])
- test('join', '', 'abcd', ('a', 'b', 'c', 'd'))
- test('join', ' ', 'w x y z', Sequence())
- test('join', 'a', 'abc', ('abc',))
- test('join', 'a', 'z', UserList(['z']))
- test('join', u'.', u'a.b.c', ['a', 'b', 'c'])
- test('join', '.', u'a.b.c', [u'a', 'b', 'c'])
- test('join', '.', u'a.b.c', ['a', u'b', 'c'])
- test('join', '.', u'a.b.c', ['a', 'b', u'c'])
- test('join', '.', TypeError, ['a', u'b', 3])
- for i in [5, 25, 125]:
- test('join', '-', ((('a' * i) + '-') * i)[:-1],
- ['a' * i] * i)
-
- test('join', ' ', TypeError, BadSeq1())
- test('join', ' ', 'a b c', BadSeq2())
-
- test('splitlines', "abc\ndef\n\rghi", ['abc', 'def', '', 'ghi'])
- test('splitlines', "abc\ndef\n\r\nghi", ['abc', 'def', '', 'ghi'])
- test('splitlines', "abc\ndef\r\nghi", ['abc', 'def', 'ghi'])
- test('splitlines', "abc\ndef\r\nghi\n", ['abc', 'def', 'ghi'])
- test('splitlines', "abc\ndef\r\nghi\n\r", ['abc', 'def', 'ghi', ''])
- test('splitlines', "\nabc\ndef\r\nghi\n\r", ['', 'abc', 'def', 'ghi', ''])
- test('splitlines', "\nabc\ndef\r\nghi\n\r", ['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], 1)
-
- test('split', 'this is the split function',
- ['this', 'is', 'the', 'split', 'function'])
- test('split', 'a|b|c|d', ['a', 'b', 'c', 'd'], '|')
- test('split', 'a|b|c|d', ['a', 'b', 'c|d'], '|', 2)
- test('split', 'a b c d', ['a', 'b c d'], None, 1)
- test('split', 'a b c d', ['a', 'b', 'c d'], None, 2)
- test('split', 'a b c d', ['a', 'b', 'c', 'd'], None, 3)
- test('split', 'a b c d', ['a', 'b', 'c', 'd'], None, 4)
- test('split', 'a b c d', ['a b c d'], None, 0)
- test('split', 'a b c d', ['a', 'b', 'c d'], None, 2)
- test('split', 'a b c d ', ['a', 'b', 'c', 'd'])
-
- test('strip', ' hello ', 'hello')
- test('lstrip', ' hello ', 'hello ')
- test('rstrip', ' hello ', ' hello')
- test('strip', 'hello', 'hello')
-
- test('swapcase', 'HeLLo cOmpUteRs', 'hEllO CoMPuTErS')
- test('translate', 'xyzabcdef', 'xyzxyz', transtable, 'def')
-
- table = string.maketrans('a', 'A')
- test('translate', 'abc', 'Abc', table)
- test('translate', 'xyz', 'xyz', table)
-
- test('replace', 'one!two!three!', 'one@two!three!', '!', '@', 1)
- test('replace', 'one!two!three!', 'onetwothree', '!', '')
- test('replace', 'one!two!three!', 'one@two@three!', '!', '@', 2)
- test('replace', 'one!two!three!', 'one@two@three@', '!', '@', 3)
- test('replace', 'one!two!three!', 'one@two@three@', '!', '@', 4)
- test('replace', 'one!two!three!', 'one!two!three!', '!', '@', 0)
- test('replace', 'one!two!three!', 'one@two@three@', '!', '@')
- test('replace', 'one!two!three!', 'one!two!three!', 'x', '@')
- test('replace', 'one!two!three!', 'one!two!three!', 'x', '@', 2)
-
- test('startswith', 'hello', 1, 'he')
- test('startswith', 'hello', 1, 'hello')
- test('startswith', 'hello', 0, 'hello world')
- test('startswith', 'hello', 1, '')
- test('startswith', 'hello', 0, 'ello')
- test('startswith', 'hello', 1, 'ello', 1)
- test('startswith', 'hello', 1, 'o', 4)
- test('startswith', 'hello', 0, 'o', 5)
- test('startswith', 'hello', 1, '', 5)
- test('startswith', 'hello', 0, 'lo', 6)
- test('startswith', 'helloworld', 1, 'lowo', 3)
- test('startswith', 'helloworld', 1, 'lowo', 3, 7)
- test('startswith', 'helloworld', 0, 'lowo', 3, 6)
-
- test('endswith', 'hello', 1, 'lo')
- test('endswith', 'hello', 0, 'he')
- test('endswith', 'hello', 1, '')
- test('endswith', 'hello', 0, 'hello world')
- test('endswith', 'helloworld', 0, 'worl')
- test('endswith', 'helloworld', 1, 'worl', 3, 9)
- test('endswith', 'helloworld', 1, 'world', 3, 12)
- test('endswith', 'helloworld', 1, 'lowo', 1, 7)
- test('endswith', 'helloworld', 1, 'lowo', 2, 7)
- test('endswith', 'helloworld', 1, 'lowo', 3, 7)
- test('endswith', 'helloworld', 0, 'lowo', 4, 7)
- test('endswith', 'helloworld', 0, 'lowo', 3, 8)
- test('endswith', 'ab', 0, 'ab', 0, 1)
- test('endswith', 'ab', 0, 'ab', 0, 0)
diff --git a/Lib/dos-8x3/stringio.py b/Lib/dos-8x3/stringio.py
deleted file mode 100755
index 6952b1a..0000000
--- a/Lib/dos-8x3/stringio.py
+++ /dev/null
@@ -1,193 +0,0 @@
-"""File-like objects that read from or write to a string buffer.
-
-This implements (nearly) all stdio methods.
-
-f = StringIO() # ready for writing
-f = StringIO(buf) # ready for reading
-f.close() # explicitly release resources held
-flag = f.isatty() # always false
-pos = f.tell() # get current position
-f.seek(pos) # set current position
-f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
-buf = f.read() # read until EOF
-buf = f.read(n) # read up to n bytes
-buf = f.readline() # read until end of line ('\n') or EOF
-list = f.readlines()# list of f.readline() results until EOF
-f.truncate([size]) # truncate file at to at most size (default: current pos)
-f.write(buf) # write at current position
-f.writelines(list) # for line in list: f.write(line)
-f.getvalue() # return whole file's contents as a string
-
-Notes:
-- Using a real file is often faster (but less convenient).
-- There's also a much faster implementation in C, called cStringIO, but
- it's not subclassable.
-- fileno() is left unimplemented so that code which uses it triggers
- an exception early.
-- Seeking far beyond EOF and then writing will insert real null
- bytes that occupy space in the buffer.
-- There's a simple test set (see end of this file).
-"""
-
-import errno
-import string
-
-class StringIO:
- def __init__(self, buf = ''):
- self.buf = buf
- self.len = len(buf)
- self.buflist = []
- self.pos = 0
- self.closed = 0
- self.softspace = 0
- def close(self):
- if not self.closed:
- self.closed = 1
- del self.buf, self.pos
- def isatty(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- return 0
- def seek(self, pos, mode = 0):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if self.buflist:
- self.buf = self.buf + string.joinfields(self.buflist, '')
- self.buflist = []
- if mode == 1:
- pos = pos + self.pos
- elif mode == 2:
- pos = pos + self.len
- self.pos = max(0, pos)
- def tell(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- return self.pos
- def read(self, n = -1):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if self.buflist:
- self.buf = self.buf + string.joinfields(self.buflist, '')
- self.buflist = []
- if n < 0:
- newpos = self.len
- else:
- newpos = min(self.pos+n, self.len)
- r = self.buf[self.pos:newpos]
- self.pos = newpos
- return r
- def readline(self, length=None):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if self.buflist:
- self.buf = self.buf + string.joinfields(self.buflist, '')
- self.buflist = []
- i = string.find(self.buf, '\n', self.pos)
- if i < 0:
- newpos = self.len
- else:
- newpos = i+1
- if length is not None:
- if self.pos + length < newpos:
- newpos = self.pos + length
- r = self.buf[self.pos:newpos]
- self.pos = newpos
- return r
- def readlines(self, sizehint = 0):
- total = 0
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline()
- return lines
- def truncate(self, size=None):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if size is None:
- size = self.pos
- elif size < 0:
- raise IOError(errno.EINVAL,
- "Negative size not allowed")
- elif size < self.pos:
- self.pos = size
- self.buf = self.getvalue()[:size]
- def write(self, s):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if not s: return
- if self.pos > self.len:
- self.buflist.append('\0'*(self.pos - self.len))
- self.len = self.pos
- newpos = self.pos + len(s)
- if self.pos < self.len:
- if self.buflist:
- self.buf = self.buf + string.joinfields(self.buflist, '')
- self.buflist = []
- self.buflist = [self.buf[:self.pos], s, self.buf[newpos:]]
- self.buf = ''
- if newpos > self.len:
- self.len = newpos
- else:
- self.buflist.append(s)
- self.len = newpos
- self.pos = newpos
- def writelines(self, list):
- self.write(string.joinfields(list, ''))
- def flush(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- def getvalue(self):
- if self.buflist:
- self.buf = self.buf + string.joinfields(self.buflist, '')
- self.buflist = []
- return self.buf
-
-
-# A little test suite
-
-def test():
- import sys
- if sys.argv[1:]:
- file = sys.argv[1]
- else:
- file = '/etc/passwd'
- lines = open(file, 'r').readlines()
- text = open(file, 'r').read()
- f = StringIO()
- for line in lines[:-2]:
- f.write(line)
- f.writelines(lines[-2:])
- if f.getvalue() != text:
- raise RuntimeError, 'write failed'
- length = f.tell()
- print 'File length =', length
- f.seek(len(lines[0]))
- f.write(lines[1])
- f.seek(0)
- print 'First line =', `f.readline()`
- here = f.tell()
- line = f.readline()
- print 'Second line =', `line`
- f.seek(-len(line), 1)
- line2 = f.read(len(line))
- if line != line2:
- raise RuntimeError, 'bad result after seek back'
- f.seek(len(line2), 1)
- list = f.readlines()
- line = list[-1]
- f.seek(f.tell() - len(line))
- line2 = f.read()
- if line != line2:
- raise RuntimeError, 'bad result after seek back from EOF'
- print 'Read', len(list), 'more lines'
- print 'File length =', f.tell()
- if f.tell() != length:
- raise RuntimeError, 'bad length'
- f.close()
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/stringol.py b/Lib/dos-8x3/stringol.py
deleted file mode 100644
index c3e6f6f..0000000
--- a/Lib/dos-8x3/stringol.py
+++ /dev/null
@@ -1,431 +0,0 @@
-# module 'string' -- A collection of string operations
-
-# Warning: most of the code you see here isn't normally used nowadays. With
-# Python 1.6, many of these functions are implemented as methods on the
-# standard string object. They used to be implemented by a built-in module
-# called strop, but strop is now obsolete itself.
-
-"""Common string manipulations.
-
-Public module variables:
-
-whitespace -- a string containing all characters considered whitespace
-lowercase -- a string containing all characters considered lowercase letters
-uppercase -- a string containing all characters considered uppercase letters
-letters -- a string containing all characters considered letters
-digits -- a string containing all characters considered decimal digits
-hexdigits -- a string containing all characters considered hexadecimal digits
-octdigits -- a string containing all characters considered octal digits
-
-"""
-
-# Some strings for ctype-style character classification
-whitespace = ' \t\n\r\v\f'
-lowercase = 'abcdefghijklmnopqrstuvwxyz'
-uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-letters = lowercase + uppercase
-digits = '0123456789'
-hexdigits = digits + 'abcdef' + 'ABCDEF'
-octdigits = '01234567'
-
-# Case conversion helpers
-_idmap = ''
-for i in range(256): _idmap = _idmap + chr(i)
-del i
-
-# Backward compatible names for exceptions
-index_error = ValueError
-atoi_error = ValueError
-atof_error = ValueError
-atol_error = ValueError
-
-# convert UPPER CASE letters to lower case
-def lower(s):
- """lower(s) -> string
-
- Return a copy of the string s converted to lowercase.
-
- """
- return s.lower()
-
-# Convert lower case letters to UPPER CASE
-def upper(s):
- """upper(s) -> string
-
- Return a copy of the string s converted to uppercase.
-
- """
- return s.upper()
-
-# Swap lower case letters and UPPER CASE
-def swapcase(s):
- """swapcase(s) -> string
-
- Return a copy of the string s with upper case characters
- converted to lowercase and vice versa.
-
- """
- return s.swapcase()
-
-# Strip leading and trailing tabs and spaces
-def strip(s):
- """strip(s) -> string
-
- Return a copy of the string s with leading and trailing
- whitespace removed.
-
- """
- return s.strip()
-
-# Strip leading tabs and spaces
-def lstrip(s):
- """lstrip(s) -> string
-
- Return a copy of the string s with leading whitespace removed.
-
- """
- return s.lstrip()
-
-# Strip trailing tabs and spaces
-def rstrip(s):
- """rstrip(s) -> string
-
- Return a copy of the string s with trailing whitespace
- removed.
-
- """
- return s.rstrip()
-
-
-# Split a string into a list of space/tab-separated words
-# NB: split(s) is NOT the same as splitfields(s, ' ')!
-def split(s, sep=None, maxsplit=0):
- """split(str [,sep [,maxsplit]]) -> list of strings
-
- Return a list of the words in the string s, using sep as the
- delimiter string. If maxsplit is nonzero, splits into at most
- maxsplit words If sep is not specified, any whitespace string
- is a separator. Maxsplit defaults to 0.
-
- (split and splitfields are synonymous)
-
- """
- return s.split(sep, maxsplit)
-splitfields = split
-
-# Join fields with optional separator
-def join(words, sep = ' '):
- """join(list [,sep]) -> string
-
- Return a string composed of the words in list, with
- intervening occurrences of sep. The default separator is a
- single space.
-
- (joinfields and join are synonymous)
-
- """
- return sep.join(words)
-joinfields = join
-
-# for a little bit of speed
-_apply = apply
-
-# Find substring, raise exception if not found
-def index(s, *args):
- """index(s, sub [,start [,end]]) -> int
-
- Like find but raises ValueError when the substring is not found.
-
- """
- return _apply(s.index, args)
-
-# Find last substring, raise exception if not found
-def rindex(s, *args):
- """rindex(s, sub [,start [,end]]) -> int
-
- Like rfind but raises ValueError when the substring is not found.
-
- """
- return _apply(s.rindex, args)
-
-# Count non-overlapping occurrences of substring
-def count(s, *args):
- """count(s, sub[, start[,end]]) -> int
-
- Return the number of occurrences of substring sub in string
- s[start:end]. Optional arguments start and end are
- interpreted as in slice notation.
-
- """
- return _apply(s.count, args)
-
-# Find substring, return -1 if not found
-def find(s, *args):
- """find(s, sub [,start [,end]]) -> in
-
- Return the lowest index in s where substring sub is found,
- such that sub is contained within s[start,end]. Optional
- arguments start and end are interpreted as in slice notation.
-
- Return -1 on failure.
-
- """
- return _apply(s.find, args)
-
-# Find last substring, return -1 if not found
-def rfind(s, *args):
- """rfind(s, sub [,start [,end]]) -> int
-
- Return the highest index in s where substring sub is found,
- such that sub is contained within s[start,end]. Optional
- arguments start and end are interpreted as in slice notation.
-
- Return -1 on failure.
-
- """
- return _apply(s.rfind, args)
-
-# for a bit of speed
-_float = float
-_int = int
-_long = long
-_StringType = type('')
-
-# Convert string to float
-def atof(s):
- """atof(s) -> float
-
- Return the floating point number represented by the string s.
-
- """
- if type(s) == _StringType:
- return _float(s)
- else:
- raise TypeError('argument 1: expected string, %s found' %
- type(s).__name__)
-
-# Convert string to integer
-def atoi(*args):
- """atoi(s [,base]) -> int
-
- Return the integer represented by the string s in the given
- base, which defaults to 10. The string s must consist of one
- or more digits, possibly preceded by a sign. If base is 0, it
- is chosen from the leading characters of s, 0 for octal, 0x or
- 0X for hexadecimal. If base is 16, a preceding 0x or 0X is
- accepted.
-
- """
- try:
- s = args[0]
- except IndexError:
- raise TypeError('function requires at least 1 argument: %d given' %
- len(args))
- # Don't catch type error resulting from too many arguments to int(). The
- # error message isn't compatible but the error type is, and this function
- # is complicated enough already.
- if type(s) == _StringType:
- return _apply(_int, args)
- else:
- raise TypeError('argument 1: expected string, %s found' %
- type(s).__name__)
-
-
-# Convert string to long integer
-def atol(*args):
- """atol(s [,base]) -> long
-
- Return the long integer represented by the string s in the
- given base, which defaults to 10. The string s must consist
- of one or more digits, possibly preceded by a sign. If base
- is 0, it is chosen from the leading characters of s, 0 for
- octal, 0x or 0X for hexadecimal. If base is 16, a preceding
- 0x or 0X is accepted. A trailing L or l is not accepted,
- unless base is 0.
-
- """
- try:
- s = args[0]
- except IndexError:
- raise TypeError('function requires at least 1 argument: %d given' %
- len(args))
- # Don't catch type error resulting from too many arguments to long(). The
- # error message isn't compatible but the error type is, and this function
- # is complicated enough already.
- if type(s) == _StringType:
- return _apply(_long, args)
- else:
- raise TypeError('argument 1: expected string, %s found' %
- type(s).__name__)
-
-
-# Left-justify a string
-def ljust(s, width):
- """ljust(s, width) -> string
-
- Return a left-justified version of s, in a field of the
- specified width, padded with spaces as needed. The string is
- never truncated.
-
- """
- n = width - len(s)
- if n <= 0: return s
- return s + ' '*n
-
-# Right-justify a string
-def rjust(s, width):
- """rjust(s, width) -> string
-
- Return a right-justified version of s, in a field of the
- specified width, padded with spaces as needed. The string is
- never truncated.
-
- """
- n = width - len(s)
- if n <= 0: return s
- return ' '*n + s
-
-# Center a string
-def center(s, width):
- """center(s, width) -> string
-
- Return a center version of s, in a field of the specified
- width. padded with spaces as needed. The string is never
- truncated.
-
- """
- n = width - len(s)
- if n <= 0: return s
- half = n/2
- if n%2 and width%2:
- # This ensures that center(center(s, i), j) = center(s, j)
- half = half+1
- return ' '*half + s + ' '*(n-half)
-
-# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
-# Decadent feature: the argument may be a string or a number
-# (Use of this is deprecated; it should be a string as with ljust c.s.)
-def zfill(x, width):
- """zfill(x, width) -> string
-
- Pad a numeric string x with zeros on the left, to fill a field
- of the specified width. The string x is never truncated.
-
- """
- if type(x) == type(''): s = x
- else: s = `x`
- n = len(s)
- if n >= width: return s
- sign = ''
- if s[0] in ('-', '+'):
- sign, s = s[0], s[1:]
- return sign + '0'*(width-n) + s
-
-# Expand tabs in a string.
-# Doesn't take non-printing chars into account, but does understand \n.
-def expandtabs(s, tabsize=8):
- """expandtabs(s [,tabsize]) -> string
-
- Return a copy of the string s with all tab characters replaced
- by the appropriate number of spaces, depending on the current
- column, and the tabsize (default 8).
-
- """
- res = line = ''
- for c in s:
- if c == '\t':
- c = ' '*(tabsize - len(line) % tabsize)
- line = line + c
- if c == '\n':
- res = res + line
- line = ''
- return res + line
-
-# Character translation through look-up table.
-def translate(s, table, deletions=""):
- """translate(s,table [,deletechars]) -> string
-
- Return a copy of the string s, where all characters occurring
- in the optional argument deletechars are removed, and the
- remaining characters have been mapped through the given
- translation table, which must be a string of length 256.
-
- """
- return s.translate(table, deletions)
-
-# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
-def capitalize(s):
- """capitalize(s) -> string
-
- Return a copy of the string s with only its first character
- capitalized.
-
- """
- return s.capitalize()
-
-# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
-# See also regsub.capwords().
-def capwords(s, sep=None):
- """capwords(s, [sep]) -> string
-
- Split the argument into words using split, capitalize each
- word using capitalize, and join the capitalized words using
- join. Note that this replaces runs of whitespace characters by
- a single space.
-
- """
- return join(map(capitalize, s.split(sep)), sep or ' ')
-
-# Construct a translation string
-_idmapL = None
-def maketrans(fromstr, tostr):
- """maketrans(frm, to) -> string
-
- Return a translation table (a string of 256 bytes long)
- suitable for use in string.translate. The strings frm and to
- must be of the same length.
-
- """
- if len(fromstr) != len(tostr):
- raise ValueError, "maketrans arguments must have same length"
- global _idmapL
- if not _idmapL:
- _idmapL = map(None, _idmap)
- L = _idmapL[:]
- fromstr = map(ord, fromstr)
- for i in range(len(fromstr)):
- L[fromstr[i]] = tostr[i]
- return joinfields(L, "")
-
-# Substring replacement (global)
-def replace(s, old, new, maxsplit=0):
- """replace (str, old, new[, maxsplit]) -> string
-
- Return a copy of string str with all occurrences of substring
- old replaced by new. If the optional argument maxsplit is
- given, only the first maxsplit occurrences are replaced.
-
- """
- return s.replace(old, new, maxsplit)
-
-
-# XXX: transitional
-#
-# If string objects do not have methods, then we need to use the old string.py
-# library, which uses strop for many more things than just the few outlined
-# below.
-try:
- ''.upper
-except AttributeError:
- from stringold import *
-
-# Try importing optional built-in module "strop" -- if it exists,
-# it redefines some string operations that are 100-1000 times faster.
-# It also defines values for whitespace, lowercase and uppercase
-# that match <ctype.h>'s definitions.
-
-try:
- from strop import maketrans, lowercase, uppercase, whitespace
- letters = lowercase + uppercase
-except ImportError:
- pass # Use the original versions
diff --git a/Lib/dos-8x3/telnetli.py b/Lib/dos-8x3/telnetli.py
deleted file mode 100644
index dfd549e..0000000
--- a/Lib/dos-8x3/telnetli.py
+++ /dev/null
@@ -1,503 +0,0 @@
-"""TELNET client class.
-
-Based on RFC 854: TELNET Protocol Specification, by J. Postel and
-J. Reynolds
-
-Example:
-
->>> from telnetlib import Telnet
->>> tn = Telnet('www.python.org', 79) # connect to finger port
->>> tn.write('guido\r\n')
->>> print tn.read_all()
-Login Name TTY Idle When Where
-guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
-
->>>
-
-Note that read_all() won't read until eof -- it just reads some data
--- but it guarantees to read at least one byte unless EOF is hit.
-
-It is possible to pass a Telnet object to select.select() in order to
-wait until more data is available. Note that in this case,
-read_eager() may return '' even if there was data on the socket,
-because the protocol negotiation may have eaten the data. This is why
-EOFError is needed in some cases to distinguish between "no data" and
-"connection closed" (since the socket also appears ready for reading
-when it is closed).
-
-Bugs:
-- may hang when connection is slow in the middle of an IAC sequence
-
-To do:
-- option negotiation
-- timeout should be intrinsic to the connection object instead of an
- option on one of the read calls only
-
-"""
-
-
-# Imported modules
-import sys
-import socket
-import select
-import string
-
-# Tunable parameters
-DEBUGLEVEL = 0
-
-# Telnet protocol defaults
-TELNET_PORT = 23
-
-# Telnet protocol characters (don't change)
-IAC = chr(255) # "Interpret As Command"
-DONT = chr(254)
-DO = chr(253)
-WONT = chr(252)
-WILL = chr(251)
-theNULL = chr(0)
-
-
-class Telnet:
-
- """Telnet interface class.
-
- An instance of this class represents a connection to a telnet
- server. The instance is initially not connected; the open()
- method must be used to establish a connection. Alternatively, the
- host name and optional port number can be passed to the
- constructor, too.
-
- Don't try to reopen an already connected instance.
-
- This class has many read_*() methods. Note that some of them
- raise EOFError when the end of the connection is read, because
- they can return an empty string for other reasons. See the
- individual doc strings.
-
- read_until(expected, [timeout])
- Read until the expected string has been seen, or a timeout is
- hit (default is no timeout); may block.
-
- read_all()
- Read all data until EOF; may block.
-
- read_some()
- Read at least one byte or EOF; may block.
-
- read_very_eager()
- Read all data available already queued or on the socket,
- without blocking.
-
- read_eager()
- Read either data already queued or some data available on the
- socket, without blocking.
-
- read_lazy()
- Read all data in the raw queue (processing it first), without
- doing any socket I/O.
-
- read_very_lazy()
- Reads all data in the cooked queue, without doing any socket
- I/O.
-
- """
-
- def __init__(self, host=None, port=0):
- """Constructor.
-
- When called without arguments, create an unconnected instance.
- With a hostname argument, it connects the instance; a port
- number is optional.
-
- """
- self.debuglevel = DEBUGLEVEL
- self.host = host
- self.port = port
- self.sock = None
- self.rawq = ''
- self.irawq = 0
- self.cookedq = ''
- self.eof = 0
- if host:
- self.open(host, port)
-
- def open(self, host, port=0):
- """Connect to a host.
-
- The optional second argument is the port number, which
- defaults to the standard telnet port (23).
-
- Don't try to reopen an already connected instance.
-
- """
- self.eof = 0
- if not port:
- port = TELNET_PORT
- self.host = host
- self.port = port
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.sock.connect((self.host, self.port))
-
- def __del__(self):
- """Destructor -- close the connection."""
- self.close()
-
- def msg(self, msg, *args):
- """Print a debug message, when the debug level is > 0.
-
- If extra arguments are present, they are substituted in the
- message using the standard string formatting operator.
-
- """
- if self.debuglevel > 0:
- print 'Telnet(%s,%d):' % (self.host, self.port),
- if args:
- print msg % args
- else:
- print msg
-
- def set_debuglevel(self, debuglevel):
- """Set the debug level.
-
- The higher it is, the more debug output you get (on sys.stdout).
-
- """
- self.debuglevel = debuglevel
-
- def close(self):
- """Close the connection."""
- if self.sock:
- self.sock.close()
- self.sock = 0
- self.eof = 1
-
- def get_socket(self):
- """Return the socket object used internally."""
- return self.sock
-
- def fileno(self):
- """Return the fileno() of the socket object used internally."""
- return self.sock.fileno()
-
- def write(self, buffer):
- """Write a string to the socket, doubling any IAC characters.
-
- Can block if the connection is blocked. May raise
- socket.error if the connection is closed.
-
- """
- if IAC in buffer:
- buffer = string.replace(buffer, IAC, IAC+IAC)
- self.msg("send %s", `buffer`)
- self.sock.send(buffer)
-
- def read_until(self, match, timeout=None):
- """Read until a given string is encountered or until timeout.
-
- When no match is found, return whatever is available instead,
- possibly the empty string. Raise EOFError if the connection
- is closed and no cooked data is available.
-
- """
- n = len(match)
- self.process_rawq()
- i = string.find(self.cookedq, match)
- if i >= 0:
- i = i+n
- buf = self.cookedq[:i]
- self.cookedq = self.cookedq[i:]
- return buf
- s_reply = ([self], [], [])
- s_args = s_reply
- if timeout is not None:
- s_args = s_args + (timeout,)
- while not self.eof and apply(select.select, s_args) == s_reply:
- i = max(0, len(self.cookedq)-n)
- self.fill_rawq()
- self.process_rawq()
- i = string.find(self.cookedq, match, i)
- if i >= 0:
- i = i+n
- buf = self.cookedq[:i]
- self.cookedq = self.cookedq[i:]
- return buf
- return self.read_very_lazy()
-
- def read_all(self):
- """Read all data until EOF; block until connection closed."""
- self.process_rawq()
- while not self.eof:
- self.fill_rawq()
- self.process_rawq()
- buf = self.cookedq
- self.cookedq = ''
- return buf
-
- def read_some(self):
- """Read at least one byte of cooked data unless EOF is hit.
-
- Return '' if EOF is hit. Block if no data is immediately
- available.
-
- """
- self.process_rawq()
- while not self.cookedq and not self.eof:
- self.fill_rawq()
- self.process_rawq()
- buf = self.cookedq
- self.cookedq = ''
- return buf
-
- def read_very_eager(self):
- """Read everything that's possible without blocking in I/O (eager).
-
- Raise EOFError if connection closed and no cooked data
- available. Return '' if no cooked data available otherwise.
- Don't block unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- while not self.eof and self.sock_avail():
- self.fill_rawq()
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_eager(self):
- """Read readily available data.
-
- Raise EOFError if connection closed and no cooked data
- available. Return '' if no cooked data available otherwise.
- Don't block unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- while not self.cookedq and not self.eof and self.sock_avail():
- self.fill_rawq()
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_lazy(self):
- """Process and return data that's already in the queues (lazy).
-
- Raise EOFError if connection closed and no data available.
- Return '' if no cooked data available otherwise. Don't block
- unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_very_lazy(self):
- """Return any data available in the cooked queue (very lazy).
-
- Raise EOFError if connection closed and no data available.
- Return '' if no cooked data available otherwise. Don't block.
-
- """
- buf = self.cookedq
- self.cookedq = ''
- if not buf and self.eof and not self.rawq:
- raise EOFError, 'telnet connection closed'
- return buf
-
- def process_rawq(self):
- """Transfer from raw queue to cooked queue.
-
- Set self.eof when connection is closed. Don't block unless in
- the midst of an IAC sequence.
-
- """
- buf = ''
- try:
- while self.rawq:
- c = self.rawq_getchar()
- if c == theNULL:
- continue
- if c == "\021":
- continue
- if c != IAC:
- buf = buf + c
- continue
- c = self.rawq_getchar()
- if c == IAC:
- buf = buf + c
- elif c in (DO, DONT):
- opt = self.rawq_getchar()
- self.msg('IAC %s %d', c == DO and 'DO' or 'DONT', ord(c))
- self.sock.send(IAC + WONT + opt)
- elif c in (WILL, WONT):
- opt = self.rawq_getchar()
- self.msg('IAC %s %d',
- c == WILL and 'WILL' or 'WONT', ord(c))
- self.sock.send(IAC + DONT + opt)
- else:
- self.msg('IAC %s not recognized' % `c`)
- except EOFError: # raised by self.rawq_getchar()
- pass
- self.cookedq = self.cookedq + buf
-
- def rawq_getchar(self):
- """Get next char from raw queue.
-
- Block if no data is immediately available. Raise EOFError
- when connection is closed.
-
- """
- if not self.rawq:
- self.fill_rawq()
- if self.eof:
- raise EOFError
- c = self.rawq[self.irawq]
- self.irawq = self.irawq + 1
- if self.irawq >= len(self.rawq):
- self.rawq = ''
- self.irawq = 0
- return c
-
- def fill_rawq(self):
- """Fill raw queue from exactly one recv() system call.
-
- Block if no data is immediately available. Set self.eof when
- connection is closed.
-
- """
- if self.irawq >= len(self.rawq):
- self.rawq = ''
- self.irawq = 0
- # The buffer size should be fairly small so as to avoid quadratic
- # behavior in process_rawq() above
- buf = self.sock.recv(50)
- self.msg("recv %s", `buf`)
- self.eof = (not buf)
- self.rawq = self.rawq + buf
-
- def sock_avail(self):
- """Test whether data is available on the socket."""
- return select.select([self], [], [], 0) == ([self], [], [])
-
- def interact(self):
- """Interaction function, emulates a very dumb telnet client."""
- if sys.platform == "win32":
- self.mt_interact()
- return
- while 1:
- rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
- if self in rfd:
- try:
- text = self.read_eager()
- except EOFError:
- print '*** Connection closed by remote host ***'
- break
- if text:
- sys.stdout.write(text)
- sys.stdout.flush()
- if sys.stdin in rfd:
- line = sys.stdin.readline()
- if not line:
- break
- self.write(line)
-
- def mt_interact(self):
- """Multithreaded version of interact()."""
- import thread
- thread.start_new_thread(self.listener, ())
- while 1:
- line = sys.stdin.readline()
- if not line:
- break
- self.write(line)
-
- def listener(self):
- """Helper for mt_interact() -- this executes in the other thread."""
- while 1:
- try:
- data = self.read_eager()
- except EOFError:
- print '*** Connection closed by remote host ***'
- return
- if data:
- sys.stdout.write(data)
- else:
- sys.stdout.flush()
-
- def expect(self, list, timeout=None):
- """Read until one from a list of a regular expressions matches.
-
- The first argument is a list of regular expressions, either
- compiled (re.RegexObject instances) or uncompiled (strings).
- The optional second argument is a timeout, in seconds; default
- is no timeout.
-
- Return a tuple of three items: the index in the list of the
- first regular expression that matches; the match object
- returned; and the text read up till and including the match.
-
- If EOF is read and no text was read, raise EOFError.
- Otherwise, when nothing matches, return (-1, None, text) where
- text is the text received so far (may be the empty string if a
- timeout happened).
-
- If a regular expression ends with a greedy match (e.g. '.*')
- or if more than one expression can match the same input, the
- results are undeterministic, and may depend on the I/O timing.
-
- """
- re = None
- list = list[:]
- indices = range(len(list))
- for i in indices:
- if not hasattr(list[i], "search"):
- if not re: import re
- list[i] = re.compile(list[i])
- while 1:
- self.process_rawq()
- for i in indices:
- m = list[i].search(self.cookedq)
- if m:
- e = m.end()
- text = self.cookedq[:e]
- self.cookedq = self.cookedq[e:]
- return (i, m, text)
- if self.eof:
- break
- if timeout is not None:
- r, w, x = select.select([self.fileno()], [], [], timeout)
- if not r:
- break
- self.fill_rawq()
- text = self.read_very_lazy()
- if not text and self.eof:
- raise EOFError
- return (-1, None, text)
-
-
-def test():
- """Test program for telnetlib.
-
- Usage: python telnetlib.py [-d] ... [host [port]]
-
- Default host is localhost; default port is 23.
-
- """
- debuglevel = 0
- while sys.argv[1:] and sys.argv[1] == '-d':
- debuglevel = debuglevel+1
- del sys.argv[1]
- host = 'localhost'
- if sys.argv[1:]:
- host = sys.argv[1]
- port = 0
- if sys.argv[2:]:
- portstr = sys.argv[2]
- try:
- port = int(portstr)
- except ValueError:
- port = socket.getservbyname(portstr, 'tcp')
- tn = Telnet()
- tn.set_debuglevel(debuglevel)
- tn.open(host, port)
- tn.interact()
- tn.close()
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/dos-8x3/test_arr.py b/Lib/dos-8x3/test_arr.py
deleted file mode 100644
index fb451a9..0000000
--- a/Lib/dos-8x3/test_arr.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#! /usr/bin/env python
-"""Test the arraymodule.
- Roger E. Masse
-"""
-import array
-from test_support import verbose, TESTFN, unlink, TestFailed
-
-def main():
-
- testtype('c', 'c')
-
- for type in (['b', 'h', 'i', 'l', 'f', 'd']):
- testtype(type, 1)
-
- unlink(TESTFN)
-
-
-def testoverflow(type, lowerLimit, upperLimit):
- # should not overflow assigning lower limit
- if verbose:
- print "overflow test: array(%s, [%s])" % (`type`, `lowerLimit`)
- try:
- a = array.array(type, [lowerLimit])
- except:
- raise TestFailed, "array(%s) overflowed assigning %s" %\
- (`type`, `lowerLimit`)
- # should overflow assigning less than lower limit
- if verbose:
- print "overflow test: array(%s, [%s])" % (`type`, `lowerLimit-1`)
- try:
- a = array.array(type, [lowerLimit-1])
- raise TestFailed, "array(%s) did not overflow assigning %s" %\
- (`type`, `lowerLimit-1`)
- except OverflowError:
- pass
- # should not overflow assigning upper limit
- if verbose:
- print "overflow test: array(%s, [%s])" % (`type`, `upperLimit`)
- try:
- a = array.array(type, [upperLimit])
- except:
- raise TestFailed, "array(%s) overflowed assigning %s" %\
- (`type`, `upperLimit`)
- # should overflow assigning more than upper limit
- if verbose:
- print "overflow test: array(%s, [%s])" % (`type`, `upperLimit+1`)
- try:
- a = array.array(type, [upperLimit+1])
- raise TestFailed, "array(%s) did not overflow assigning %s" %\
- (`type`, `upperLimit+1`)
- except OverflowError:
- pass
-
-
-
-def testtype(type, example):
-
- a = array.array(type)
- a.append(example)
- if verbose:
- print 40*'*'
- print 'array after append: ', a
- a.typecode
- a.itemsize
- if a.typecode in ('i', 'b', 'h', 'l'):
- a.byteswap()
-
- if a.typecode == 'c':
- f = open(TESTFN, "w")
- f.write("The quick brown fox jumps over the lazy dog.\n")
- f.close()
- f = open(TESTFN, 'r')
- a.fromfile(f, 10)
- f.close()
- if verbose:
- print 'char array with 10 bytes of TESTFN appended: ', a
- a.fromlist(['a', 'b', 'c'])
- if verbose:
- print 'char array with list appended: ', a
-
- a.insert(0, example)
- if verbose:
- print 'array of %s after inserting another:' % a.typecode, a
- f = open(TESTFN, 'w')
- a.tofile(f)
- f.close()
- a.tolist()
- a.tostring()
- if verbose:
- print 'array of %s converted to a list: ' % a.typecode, a.tolist()
- if verbose:
- print 'array of %s converted to a string: ' \
- % a.typecode, `a.tostring()`
-
- if type == 'c':
- a = array.array(type, "abcde")
- a[:-1] = a
- if a != array.array(type, "abcdee"):
- raise TestFailed, "array(%s) self-slice-assign (head)" % `type`
- a = array.array(type, "abcde")
- a[1:] = a
- if a != array.array(type, "aabcde"):
- raise TestFailed, "array(%s) self-slice-assign (tail)" % `type`
- a = array.array(type, "abcde")
- a[1:-1] = a
- if a != array.array(type, "aabcdee"):
- raise TestFailed, "array(%s) self-slice-assign (cntr)" % `type`
- if a.index("e") != 5:
- raise TestFailed, "array(%s) index-test" % `type`
- if a.count("a") != 2:
- raise TestFailed, "array(%s) count-test" % `type`
- a.remove("e")
- if a != array.array(type, "aabcde"):
- raise TestFailed, "array(%s) remove-test" % `type`
- if a.pop(0) != "a":
- raise TestFailed, "array(%s) pop-test" % `type`
- if a.pop(1) != "b":
- raise TestFailed, "array(%s) pop-test" % `type`
- a.extend(array.array(type, "xyz"))
- if a != array.array(type, "acdexyz"):
- raise TestFailed, "array(%s) extend-test" % `type`
- a.pop()
- a.pop()
- a.pop()
- x = a.pop()
- if x != 'e':
- raise TestFailed, "array(%s) pop-test" % `type`
- if a != array.array(type, "acd"):
- raise TestFailed, "array(%s) pop-test" % `type`
- a.reverse()
- if a != array.array(type, "dca"):
- raise TestFailed, "array(%s) reverse-test" % `type`
- else:
- a = array.array(type, [1, 2, 3, 4, 5])
- a[:-1] = a
- if a != array.array(type, [1, 2, 3, 4, 5, 5]):
- raise TestFailed, "array(%s) self-slice-assign (head)" % `type`
- a = array.array(type, [1, 2, 3, 4, 5])
- a[1:] = a
- if a != array.array(type, [1, 1, 2, 3, 4, 5]):
- raise TestFailed, "array(%s) self-slice-assign (tail)" % `type`
- a = array.array(type, [1, 2, 3, 4, 5])
- a[1:-1] = a
- if a != array.array(type, [1, 1, 2, 3, 4, 5, 5]):
- raise TestFailed, "array(%s) self-slice-assign (cntr)" % `type`
- if a.index(5) != 5:
- raise TestFailed, "array(%s) index-test" % `type`
- if a.count(1) != 2:
- raise TestFailed, "array(%s) count-test" % `type`
- a.remove(5)
- if a != array.array(type, [1, 1, 2, 3, 4, 5]):
- raise TestFailed, "array(%s) remove-test" % `type`
- if a.pop(0) != 1:
- raise TestFailed, "array(%s) pop-test" % `type`
- if a.pop(1) != 2:
- raise TestFailed, "array(%s) pop-test" % `type`
- a.extend(array.array(type, [7, 8, 9]))
- if a != array.array(type, [1, 3, 4, 5, 7, 8, 9]):
- raise TestFailed, "array(%s) extend-test" % `type`
- a.pop()
- a.pop()
- a.pop()
- x = a.pop()
- if x != 5:
- raise TestFailed, "array(%s) pop-test" % `type`
- if a != array.array(type, [1, 3, 4]):
- raise TestFailed, "array(%s) pop-test" % `type`
- a.reverse()
- if a != array.array(type, [4, 3, 1]):
- raise TestFailed, "array(%s) reverse-test" % `type`
-
- # test that overflow exceptions are raised as expected for assignment
- # to array of specific integral types
- from math import pow
- if type in ('b', 'h', 'i', 'l'):
- # check signed and unsigned versions
- a = array.array(type)
- signedLowerLimit = -1 * long(pow(2, a.itemsize * 8 - 1))
- signedUpperLimit = long(pow(2, a.itemsize * 8 - 1)) - 1L
- unsignedLowerLimit = 0
- unsignedUpperLimit = long(pow(2, a.itemsize * 8)) - 1L
- testoverflow(type, signedLowerLimit, signedUpperLimit)
- testoverflow(type.upper(), unsignedLowerLimit, unsignedUpperLimit)
-
-
-
-main()
-
diff --git a/Lib/dos-8x3/test_ate.py b/Lib/dos-8x3/test_ate.py
deleted file mode 100644
index 517610b..0000000
--- a/Lib/dos-8x3/test_ate.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Test the exit module
-from test_support import verbose
-import atexit
-
-def handler1():
- print "handler1"
-
-def handler2(*args, **kargs):
- print "handler2", args, kargs
-
-# save any exit functions that may have been registered as part of the
-# test framework
-_exithandlers = atexit._exithandlers
-atexit._exithandlers = []
-
-atexit.register(handler1)
-atexit.register(handler2)
-atexit.register(handler2, 7, kw="abc")
-
-# simulate exit behavior by calling atexit._run_exitfuncs directly...
-atexit._run_exitfuncs()
-
-# restore exit handlers
-atexit._exithandlers = _exithandlers
diff --git a/Lib/dos-8x3/test_aud.py b/Lib/dos-8x3/test_aud.py
deleted file mode 100755
index 900ec58..0000000
--- a/Lib/dos-8x3/test_aud.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# Test audioop.
-import audioop
-from test_support import verbose
-
-def gendata1():
- return '\0\1\2'
-
-def gendata2():
- if verbose:
- print 'getsample'
- if audioop.getsample('\0\1', 2, 0) == 1:
- return '\0\0\0\1\0\2'
- else:
- return '\0\0\1\0\2\0'
-
-def gendata4():
- if verbose:
- print 'getsample'
- if audioop.getsample('\0\0\0\1', 4, 0) == 1:
- return '\0\0\0\0\0\0\0\1\0\0\0\2'
- else:
- return '\0\0\0\0\1\0\0\0\2\0\0\0'
-
-def testmax(data):
- if verbose:
- print 'max'
- if audioop.max(data[0], 1) <> 2 or \
- audioop.max(data[1], 2) <> 2 or \
- audioop.max(data[2], 4) <> 2:
- return 0
- return 1
-
-def testminmax(data):
- if verbose:
- print 'minmax'
- if audioop.minmax(data[0], 1) <> (0, 2) or \
- audioop.minmax(data[1], 2) <> (0, 2) or \
- audioop.minmax(data[2], 4) <> (0, 2):
- return 0
- return 1
-
-def testmaxpp(data):
- if verbose:
- print 'maxpp'
- if audioop.maxpp(data[0], 1) <> 0 or \
- audioop.maxpp(data[1], 2) <> 0 or \
- audioop.maxpp(data[2], 4) <> 0:
- return 0
- return 1
-
-def testavg(data):
- if verbose:
- print 'avg'
- if audioop.avg(data[0], 1) <> 1 or \
- audioop.avg(data[1], 2) <> 1 or \
- audioop.avg(data[2], 4) <> 1:
- return 0
- return 1
-
-def testavgpp(data):
- if verbose:
- print 'avgpp'
- if audioop.avgpp(data[0], 1) <> 0 or \
- audioop.avgpp(data[1], 2) <> 0 or \
- audioop.avgpp(data[2], 4) <> 0:
- return 0
- return 1
-
-def testrms(data):
- if audioop.rms(data[0], 1) <> 1 or \
- audioop.rms(data[1], 2) <> 1 or \
- audioop.rms(data[2], 4) <> 1:
- return 0
- return 1
-
-def testcross(data):
- if verbose:
- print 'cross'
- if audioop.cross(data[0], 1) <> 0 or \
- audioop.cross(data[1], 2) <> 0 or \
- audioop.cross(data[2], 4) <> 0:
- return 0
- return 1
-
-def testadd(data):
- if verbose:
- print 'add'
- data2 = []
- for d in data:
- str = ''
- for s in d:
- str = str + chr(ord(s)*2)
- data2.append(str)
- if audioop.add(data[0], data[0], 1) <> data2[0] or \
- audioop.add(data[1], data[1], 2) <> data2[1] or \
- audioop.add(data[2], data[2], 4) <> data2[2]:
- return 0
- return 1
-
-def testbias(data):
- if verbose:
- print 'bias'
- # Note: this test assumes that avg() works
- d1 = audioop.bias(data[0], 1, 100)
- d2 = audioop.bias(data[1], 2, 100)
- d4 = audioop.bias(data[2], 4, 100)
- if audioop.avg(d1, 1) <> 101 or \
- audioop.avg(d2, 2) <> 101 or \
- audioop.avg(d4, 4) <> 101:
- return 0
- return 1
-
-def testlin2lin(data):
- if verbose:
- print 'lin2lin'
- # too simple: we test only the size
- for d1 in data:
- for d2 in data:
- got = len(d1)/3
- wtd = len(d2)/3
- if len(audioop.lin2lin(d1, got, wtd)) <> len(d2):
- return 0
- return 1
-
-def testadpcm2lin(data):
- # Very cursory test
- if audioop.adpcm2lin('\0\0', 1, None) <> ('\0\0\0\0', (0,0)):
- return 0
- return 1
-
-def testlin2adpcm(data):
- if verbose:
- print 'lin2adpcm'
- # Very cursory test
- if audioop.lin2adpcm('\0\0\0\0', 1, None) <> ('\0\0', (0,0)):
- return 0
- return 1
-
-def testlin2ulaw(data):
- if verbose:
- print 'lin2ulaw'
- if audioop.lin2ulaw(data[0], 1) <> '\377\347\333' or \
- audioop.lin2ulaw(data[1], 2) <> '\377\377\377' or \
- audioop.lin2ulaw(data[2], 4) <> '\377\377\377':
- return 0
- return 1
-
-def testulaw2lin(data):
- if verbose:
- print 'ulaw2lin'
- # Cursory
- d = audioop.lin2ulaw(data[0], 1)
- if audioop.ulaw2lin(d, 1) <> data[0]:
- return 0
- return 1
-
-def testmul(data):
- if verbose:
- print 'mul'
- data2 = []
- for d in data:
- str = ''
- for s in d:
- str = str + chr(ord(s)*2)
- data2.append(str)
- if audioop.mul(data[0], 1, 2) <> data2[0] or \
- audioop.mul(data[1],2, 2) <> data2[1] or \
- audioop.mul(data[2], 4, 2) <> data2[2]:
- return 0
- return 1
-
-def testratecv(data):
- if verbose:
- print 'ratecv'
- state = None
- d1, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state)
- d2, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state)
- if d1 + d2 != '\000\000\001\001\002\001\000\000\001\001\002':
- return 0
- return 1
-
-def testreverse(data):
- if verbose:
- print 'reverse'
- if audioop.reverse(data[0], 1) <> '\2\1\0':
- return 0
- return 1
-
-def testtomono(data):
- if verbose:
- print 'tomono'
- data2 = ''
- for d in data[0]:
- data2 = data2 + d + d
- if audioop.tomono(data2, 1, 0.5, 0.5) <> data[0]:
- return 0
- return 1
-
-def testtostereo(data):
- if verbose:
- print 'tostereo'
- data2 = ''
- for d in data[0]:
- data2 = data2 + d + d
- if audioop.tostereo(data[0], 1, 1, 1) <> data2:
- return 0
- return 1
-
-def testfindfactor(data):
- if verbose:
- print 'findfactor'
- if audioop.findfactor(data[1], data[1]) <> 1.0:
- return 0
- return 1
-
-def testfindfit(data):
- if verbose:
- print 'findfit'
- if audioop.findfit(data[1], data[1]) <> (0, 1.0):
- return 0
- return 1
-
-def testfindmax(data):
- if verbose:
- print 'findmax'
- if audioop.findmax(data[1], 1) <> 2:
- return 0
- return 1
-
-def testgetsample(data):
- if verbose:
- print 'getsample'
- for i in range(3):
- if audioop.getsample(data[0], 1, i) <> i or \
- audioop.getsample(data[1], 2, i) <> i or \
- audioop.getsample(data[2], 4, i) <> i:
- return 0
- return 1
-
-def testone(name, data):
- try:
- func = eval('test'+name)
- except NameError:
- print 'No test found for audioop.'+name+'()'
- return
- try:
- rv = func(data)
- except 'xx':
- print 'Test FAILED for audioop.'+name+'() (with an exception)'
- return
- if not rv:
- print 'Test FAILED for audioop.'+name+'()'
-
-def testall():
- data = [gendata1(), gendata2(), gendata4()]
- names = dir(audioop)
- # We know there is a routine 'add'
- routines = []
- for n in names:
- if type(eval('audioop.'+n)) == type(audioop.add):
- routines.append(n)
- for n in routines:
- testone(n, data)
-testall()
diff --git a/Lib/dos-8x3/test_aug.py b/Lib/dos-8x3/test_aug.py
deleted file mode 100644
index a01195e..0000000
--- a/Lib/dos-8x3/test_aug.py
+++ /dev/null
@@ -1,232 +0,0 @@
-
-# Augmented assignment test.
-
-x = 2
-x += 1
-x *= 2
-x **= 2
-x -= 8
-x /= 2
-x %= 12
-x &= 2
-x |= 5
-x ^= 1
-
-print x
-
-x = [2]
-x[0] += 1
-x[0] *= 2
-x[0] **= 2
-x[0] -= 8
-x[0] /= 2
-x[0] %= 12
-x[0] &= 2
-x[0] |= 5
-x[0] ^= 1
-
-print x
-
-x = {0: 2}
-x[0] += 1
-x[0] *= 2
-x[0] **= 2
-x[0] -= 8
-x[0] /= 2
-x[0] %= 12
-x[0] &= 2
-x[0] |= 5
-x[0] ^= 1
-
-print x[0]
-
-x = [1,2]
-x += [3,4]
-x *= 2
-
-print x
-
-x = [1, 2, 3]
-y = x
-x[1:2] *= 2
-y[1:2] += [1]
-
-print x
-print x is y
-
-class aug_test:
- def __init__(self, value):
- self.val = value
- def __radd__(self, val):
- return self.val + val
- def __add__(self, val):
- return aug_test(self.val + val)
-
-
-class aug_test2(aug_test):
- def __iadd__(self, val):
- self.val = self.val + val
- return self
-
-class aug_test3(aug_test):
- def __iadd__(self, val):
- return aug_test3(self.val + val)
-
-x = aug_test(1)
-y = x
-x += 10
-
-print isinstance(x, aug_test)
-print y is not x
-print x.val
-
-x = aug_test2(2)
-y = x
-x += 10
-
-print y is x
-print x.val
-
-x = aug_test3(3)
-y = x
-x += 10
-
-print isinstance(x, aug_test3)
-print y is not x
-print x.val
-
-class testall:
-
- def __add__(self, val):
- print "__add__ called"
- def __radd__(self, val):
- print "__radd__ called"
- def __iadd__(self, val):
- print "__iadd__ called"
- return self
-
- def __sub__(self, val):
- print "__sub__ called"
- def __rsub__(self, val):
- print "__rsub__ called"
- def __isub__(self, val):
- print "__isub__ called"
- return self
-
- def __mul__(self, val):
- print "__mul__ called"
- def __rmul__(self, val):
- print "__rmul__ called"
- def __imul__(self, val):
- print "__imul__ called"
- return self
-
- def __div__(self, val):
- print "__div__ called"
- def __rdiv__(self, val):
- print "__rdiv__ called"
- def __idiv__(self, val):
- print "__idiv__ called"
- return self
-
- def __mod__(self, val):
- print "__mod__ called"
- def __rmod__(self, val):
- print "__rmod__ called"
- def __imod__(self, val):
- print "__imod__ called"
- return self
-
- def __pow__(self, val):
- print "__pow__ called"
- def __rpow__(self, val):
- print "__rpow__ called"
- def __ipow__(self, val):
- print "__ipow__ called"
- return self
-
- def __or__(self, val):
- print "__or__ called"
- def __ror__(self, val):
- print "__ror__ called"
- def __ior__(self, val):
- print "__ior__ called"
- return self
-
- def __and__(self, val):
- print "__and__ called"
- def __rand__(self, val):
- print "__rand__ called"
- def __iand__(self, val):
- print "__iand__ called"
- return self
-
- def __xor__(self, val):
- print "__xor__ called"
- def __rxor__(self, val):
- print "__rxor__ called"
- def __ixor__(self, val):
- print "__ixor__ called"
- return self
-
- def __rshift__(self, val):
- print "__rshift__ called"
- def __rrshift__(self, val):
- print "__rrshift__ called"
- def __irshift__(self, val):
- print "__irshift__ called"
- return self
-
- def __lshift__(self, val):
- print "__lshift__ called"
- def __rlshift__(self, val):
- print "__rlshift__ called"
- def __ilshift__(self, val):
- print "__ilshift__ called"
- return self
-
-x = testall()
-x + 1
-1 + x
-x += 1
-
-x - 1
-1 - x
-x -= 1
-
-x * 1
-1 * x
-x *= 1
-
-x / 1
-1 / x
-x /= 1
-
-x % 1
-1 % x
-x %= 1
-
-x ** 1
-1 ** x
-x **= 1
-
-x | 1
-1 | x
-x |= 1
-
-x & 1
-1 & x
-x &= 1
-
-x ^ 1
-1 ^ x
-x ^= 1
-
-x >> 1
-1 >> x
-x >>= 1
-
-x << 1
-1 << x
-x <<= 1
-
diff --git a/Lib/dos-8x3/test_bin.py b/Lib/dos-8x3/test_bin.py
deleted file mode 100644
index 52f817b..0000000
--- a/Lib/dos-8x3/test_bin.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""Test the binascii C module."""
-
-from test_support import verbose
-import binascii
-
-# Show module doc string
-print binascii.__doc__
-
-# Show module exceptions
-print binascii.Error
-print binascii.Incomplete
-
-# Check presence and display doc strings of all functions
-funcs = []
-for suffix in "base64", "hqx", "uu":
- prefixes = ["a2b_", "b2a_"]
- if suffix == "hqx":
- prefixes.extend(["crc_", "rlecode_", "rledecode_"])
- for prefix in prefixes:
- name = prefix + suffix
- funcs.append(getattr(binascii, name))
-for func in funcs:
- print "%-15s: %s" % (func.__name__, func.__doc__)
-
-# Create binary test data
-testdata = "The quick brown fox jumps over the lazy dog.\r\n"
-for i in range(256):
- # Be slow so we don't depend on other modules
- testdata = testdata + chr(i)
-testdata = testdata + "\r\nHello world.\n"
-
-# Test base64 with valid data
-print "base64 test"
-MAX_BASE64 = 57
-lines = []
-for i in range(0, len(testdata), MAX_BASE64):
- b = testdata[i:i+MAX_BASE64]
- a = binascii.b2a_base64(b)
- lines.append(a)
- print a,
-res = ""
-for line in lines:
- b = binascii.a2b_base64(line)
- res = res + b
-assert res == testdata
-
-# Test base64 with random invalid characters sprinkled throughout
-# (This requires a new version of binascii.)
-fillers = ""
-valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
-for i in range(256):
- c = chr(i)
- if c not in valid:
- fillers = fillers + c
-def addnoise(line):
- noise = fillers
- ratio = len(line) / len(noise)
- res = ""
- while line and noise:
- if len(line) / len(noise) > ratio:
- c, line = line[0], line[1:]
- else:
- c, noise = noise[0], noise[1:]
- res = res + c
- return res + noise + line
-res = ""
-for line in map(addnoise, lines):
- b = binascii.a2b_base64(line)
- res = res + b
-assert res == testdata
-
-# Test uu
-print "uu test"
-MAX_UU = 45
-lines = []
-for i in range(0, len(testdata), MAX_UU):
- b = testdata[i:i+MAX_UU]
- a = binascii.b2a_uu(b)
- lines.append(a)
- print a,
-res = ""
-for line in lines:
- b = binascii.a2b_uu(line)
- res = res + b
-assert res == testdata
-
-# Test crc32()
-crc = binascii.crc32("Test the CRC-32 of")
-crc = binascii.crc32(" this string.", crc)
-if crc != 1571220330:
- print "binascii.crc32() failed."
-
-# The hqx test is in test_binhex.py
-
-# test hexlification
-s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000'
-t = binascii.b2a_hex(s)
-u = binascii.a2b_hex(t)
-if s <> u:
- print 'binascii hexlification failed'
-try:
- binascii.a2b_hex(t[:-1])
-except TypeError:
- pass
-else:
- print 'expected TypeError not raised'
-try:
- binascii.a2b_hex(t[:-1] + 'q')
-except TypeError:
- pass
-else:
- print 'expected TypeError not raised'
diff --git a/Lib/dos-8x3/test_bsd.py b/Lib/dos-8x3/test_bsd.py
deleted file mode 100644
index e5780ea..0000000
--- a/Lib/dos-8x3/test_bsd.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#! /usr/bin/env python
-"""Test script for the bsddb C module
- Roger E. Masse
-"""
-
-import os
-import bsddb
-import tempfile
-from test_support import verbose
-
-def test(openmethod, what):
-
- if verbose:
- print '\nTesting: ', what
-
- fname = tempfile.mktemp()
- f = openmethod(fname, 'c')
- if verbose:
- print 'creation...'
- f['0'] = ''
- f['a'] = 'Guido'
- f['b'] = 'van'
- f['c'] = 'Rossum'
- f['d'] = 'invented'
- f['f'] = 'Python'
- if verbose:
- print '%s %s %s' % (f['a'], f['b'], f['c'])
-
- if what == 'BTree' :
- if verbose:
- print 'key ordering...'
- f.set_location(f.first()[0])
- while 1:
- try:
- rec = f.next()
- except KeyError:
- if rec <> f.last():
- print 'Error, last <> last!'
- f.previous()
- break
- if verbose:
- print rec
- if not f.has_key('a'):
- print 'Error, missing key!'
-
- f.sync()
- f.close()
- if verbose:
- print 'modification...'
- f = openmethod(fname, 'w')
- f['d'] = 'discovered'
-
- if verbose:
- print 'access...'
- for key in f.keys():
- word = f[key]
- if verbose:
- print word
-
- f.close()
- try:
- os.remove(fname)
- except os.error:
- pass
-
-types = [(bsddb.btopen, 'BTree'),
- (bsddb.hashopen, 'Hash Table'),
- # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85
- # appears broken... at least on
- # Solaris Intel - rmasse 1/97
- ]
-
-for type in types:
- test(type[0], type[1])
diff --git a/Lib/dos-8x3/test_bui.py b/Lib/dos-8x3/test_bui.py
deleted file mode 100755
index 33fef8d..0000000
--- a/Lib/dos-8x3/test_bui.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Python test set -- part 4, built-in functions
-
-from test_support import *
-
-print '4. Built-in functions'
-
-print 'test_b1'
-unload('test_b1')
-import test_b1
-
-print 'test_b2'
-unload('test_b2')
-import test_b2
diff --git a/Lib/dos-8x3/test_cfg.py b/Lib/dos-8x3/test_cfg.py
deleted file mode 100644
index 4bdbc69..0000000
--- a/Lib/dos-8x3/test_cfg.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import ConfigParser
-import StringIO
-
-def basic(src):
- print
- print "Testing basic accessors..."
- cf = ConfigParser.ConfigParser()
- sio = StringIO.StringIO(src)
- cf.readfp(sio)
- L = cf.sections()
- L.sort()
- print L
- for s in L:
- print "%s: %s" % (s, cf.options(s))
-
- # The use of spaces in the section names serves as a regression test for
- # SourceForge bug #115357.
- # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357
- print `cf.get('Foo Bar', 'foo', raw=1)`
- print `cf.get('Spacey Bar', 'foo', raw=1)`
- print `cf.get('Commented Bar', 'foo', raw=1)`
-
- if '__name__' in cf.options("Foo Bar"):
- print '__name__ "option" should not be exposed by the API!'
- else:
- print '__name__ "option" properly hidden by the API.'
-
-def interpolation(src):
- print
- print "Testing value interpolation..."
- cf = ConfigParser.ConfigParser({"getname": "%(__name__)s"})
- sio = StringIO.StringIO(src)
- cf.readfp(sio)
- print `cf.get("Foo", "getname")`
- print `cf.get("Foo", "bar")`
- print `cf.get("Foo", "bar9")`
- print `cf.get("Foo", "bar10")`
- expect_get_error(cf, ConfigParser.InterpolationDepthError, "Foo", "bar11")
-
-def parse_errors():
- print
- print "Testing for parsing errors..."
- expect_parse_error(ConfigParser.ParsingError,
- """[Foo]\n extra-spaces: splat\n""")
- expect_parse_error(ConfigParser.ParsingError,
- """[Foo]\n extra-spaces= splat\n""")
- expect_parse_error(ConfigParser.ParsingError,
- """[Foo]\noption-without-value\n""")
- expect_parse_error(ConfigParser.ParsingError,
- """[Foo]\n:value-without-option-name\n""")
- expect_parse_error(ConfigParser.ParsingError,
- """[Foo]\n=value-without-option-name\n""")
- expect_parse_error(ConfigParser.MissingSectionHeaderError,
- """No Section!\n""")
-
-def query_errors():
- print
- print "Testing query interface..."
- cf = ConfigParser.ConfigParser()
- print cf.sections()
- print "Has section 'Foo'?", cf.has_section("Foo")
- try:
- cf.options("Foo")
- except ConfigParser.NoSectionError, e:
- print "Caught expected NoSectionError:", e
- else:
- print "Failed to catch expected NoSectionError from options()"
- try:
- cf.set("foo", "bar", "value")
- except ConfigParser.NoSectionError, e:
- print "Caught expected NoSectionError:", e
- else:
- print "Failed to catch expected NoSectionError from set()"
- expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar")
- cf.add_section("foo")
- expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar")
-
-def weird_errors():
- print
- print "Testing miscellaneous error conditions..."
- cf = ConfigParser.ConfigParser()
- cf.add_section("Foo")
- try:
- cf.add_section("Foo")
- except ConfigParser.DuplicateSectionError, e:
- print "Caught expected DuplicateSectionError:", e
- else:
- print "Failed to catch expected DuplicateSectionError"
-
-def expect_get_error(cf, exctype, section, option, raw=0):
- try:
- cf.get(section, option, raw=raw)
- except exctype, e:
- print "Caught expected", exctype.__name__, ":"
- print e
- else:
- print "Failed to catch expected", exctype.__name__
-
-def expect_parse_error(exctype, src):
- cf = ConfigParser.ConfigParser()
- sio = StringIO.StringIO(src)
- try:
- cf.readfp(sio)
- except exctype, e:
- print "Caught expected exception:", e
- else:
- print "Failed to catch expected", exctype.__name__
-
-basic(r"""
-[Foo Bar]
-foo=bar
-[Spacey Bar]
-foo = bar
-[Commented Bar]
-foo: bar ; comment
-""")
-interpolation(r"""
-[Foo]
-bar=something %(with1)s interpolation (1 step)
-bar9=something %(with9)s lots of interpolation (9 steps)
-bar10=something %(with10)s lots of interpolation (10 steps)
-bar11=something %(with11)s lots of interpolation (11 steps)
-with11=%(with10)s
-with10=%(with9)s
-with9=%(with8)s
-with8=%(with7)s
-with7=%(with6)s
-with6=%(with5)s
-with5=%(with4)s
-with4=%(with3)s
-with3=%(with2)s
-with2=%(with1)s
-with1=with
-
-[Mutual Recursion]
-foo=%(bar)s
-bar=%(foo)s
-""")
-parse_errors()
-query_errors()
-weird_errors()
diff --git a/Lib/dos-8x3/test_cla.py b/Lib/dos-8x3/test_cla.py
deleted file mode 100644
index 43c1d3b..0000000
--- a/Lib/dos-8x3/test_cla.py
+++ /dev/null
@@ -1,219 +0,0 @@
-"Test the functionality of Python classes implementing operators."
-
-
-testmeths = [
-
-# Binary operations
- "add",
- "radd",
- "sub",
- "rsub",
- "mul",
- "rmul",
- "div",
- "rdiv",
- "mod",
- "rmod",
- "divmod",
- "rdivmod",
- "pow",
- "rpow",
- "rshift",
- "rrshift",
- "lshift",
- "rlshift",
- "and",
- "rand",
- "or",
- "ror",
- "xor",
- "rxor",
-
-# List/dict operations
- "contains",
- "getitem",
- "getslice",
- "setitem",
- "setslice",
- "delitem",
- "delslice",
-
-# Unary operations
- "neg",
- "pos",
- "abs",
- "int",
- "long",
- "float",
- "oct",
- "hex",
-
-# generic operations
- "init",
- "del",
- ]
-
-# These need to return something other than None
-# "coerce",
-# "hash",
-# "str",
-# "repr",
-
-# These are separate because they can influence the test of other methods.
-# "getattr",
-# "setattr",
-# "delattr",
-
-class AllTests:
- def __coerce__(self, *args):
- print "__coerce__:", args
- return (self,) + args
-
- def __hash__(self, *args):
- print "__hash__:", args
- return hash(id(self))
-
- def __str__(self, *args):
- print "__str__:", args
- return "AllTests"
-
- def __repr__(self, *args):
- print "__repr__:", args
- return "AllTests"
-
- def __cmp__(self, *args):
- print "__cmp__:", args
- return 0
-
-for method in testmeths:
- exec("""def __%(method)s__(self, *args):
- print "__%(method)s__:", args
-"""%locals(), AllTests.__dict__);
-
-# this also tests __init__ of course.
-testme = AllTests()
-
-# Binary operations
-
-testme + 1
-1 + testme
-
-testme - 1
-1 - testme
-
-testme * 1
-1 * testme
-
-testme / 1
-1 / testme
-
-testme % 1
-1 % testme
-
-divmod(testme,1)
-divmod(1, testme)
-
-testme ** 1
-1 ** testme
-
-testme >> 1
-1 >> testme
-
-testme << 1
-1 << testme
-
-testme & 1
-1 & testme
-
-testme | 1
-1 | testme
-
-testme ^ 1
-1 ^ testme
-
-
-# List/dict operations
-
-1 in testme
-
-testme[1]
-testme[1] = 1
-del testme[1]
-
-testme[:42]
-testme[:42] = "The Answer"
-del testme[:42]
-
-testme[2:1024:10]
-testme[2:1024:10] = "A lot"
-del testme[2:1024:10]
-
-testme[:42, ..., :24:, 24, 100]
-testme[:42, ..., :24:, 24, 100] = "Strange"
-del testme[:42, ..., :24:, 24, 100]
-
-
-# Now remove the slice hooks to see if converting normal slices to slice
-# object works.
-
-del AllTests.__getslice__
-del AllTests.__setslice__
-del AllTests.__delslice__
-
-testme[:42]
-testme[:42] = "The Answer"
-del testme[:42]
-
-
-# Unary operations
-
--testme
-+testme
-abs(testme)
-int(testme)
-long(testme)
-float(testme)
-oct(testme)
-hex(testme)
-
-
-# And the rest...
-
-hash(testme)
-repr(testme)
-str(testme)
-
-testme == 1
-testme < 1
-testme > 1
-testme <> 1
-testme != 1
-1 == testme
-1 < testme
-1 > testme
-1 <> testme
-1 != testme
-
-# This test has to be last (duh.)
-
-del testme
-
-
-# Interfering tests
-
-class ExtraTests:
- def __getattr__(self, *args):
- print "__getattr__:", args
- return "SomeVal"
-
- def __setattr__(self, *args):
- print "__setattr__:", args
-
- def __delattr__(self, *args):
- print "__delattr__:", args
-
-testme = ExtraTests()
-testme.spam
-testme.eggs = "spam, spam, spam and ham"
-del testme.cardinal
-
diff --git a/Lib/dos-8x3/test_cma.py b/Lib/dos-8x3/test_cma.py
deleted file mode 100644
index 509c739..0000000
--- a/Lib/dos-8x3/test_cma.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#! /usr/bin/env python
-""" Simple test script for cmathmodule.c
- Roger E. Masse
-"""
-import cmath
-from test_support import verbose
-
-testdict = {'acos' : 1.0,
- 'acosh' : 1.0,
- 'asin' : 1.0,
- 'asinh' : 1.0,
- 'atan' : 0.2,
- 'atanh' : 0.2,
- 'cos' : 1.0,
- 'cosh' : 1.0,
- 'exp' : 1.0,
- 'log' : 1.0,
- 'log10' : 1.0,
- 'sin' : 1.0,
- 'sinh' : 1.0,
- 'sqrt' : 1.0,
- 'tan' : 1.0,
- 'tanh' : 1.0}
-
-for func in testdict.keys():
- f = getattr(cmath, func)
- r = f(testdict[func])
- if verbose:
- print 'Calling %s(%f) = %f' % (func, testdict[func], abs(r))
-
-p = cmath.pi
-e = cmath.e
-if verbose:
- print 'PI = ', abs(p)
- print 'E = ', abs(e)
diff --git a/Lib/dos-8x3/test_com.py b/Lib/dos-8x3/test_com.py
deleted file mode 100644
index 8905864..0000000
--- a/Lib/dos-8x3/test_com.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from test_support import verbose, TestFailed
-
-if verbose:
- print 'Running test on duplicate arguments'
-
-try:
- exec('def f(a, a): pass')
- raise TestFailed, "duplicate arguments"
-except SyntaxError:
- pass
-
-try:
- exec('def f(a = 0, a = 1): pass')
- raise TestFailed, "duplicate keyword arguments"
-except SyntaxError:
- pass
diff --git a/Lib/dos-8x3/test_con.py b/Lib/dos-8x3/test_con.py
deleted file mode 100644
index 355135f..0000000
--- a/Lib/dos-8x3/test_con.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from test_support import TestFailed
-
-class base_set:
-
- def __init__(self, el):
- self.el = el
-
-class set(base_set):
-
- def __contains__(self, el):
- return self.el == el
-
-class seq(base_set):
-
- def __getitem__(self, n):
- return [self.el][n]
-
-def check(ok, *args):
- if not ok:
- raise TestFailed, " ".join(map(str, args))
-
-a = base_set(1)
-b = set(1)
-c = seq(1)
-
-check(1 in b, "1 not in set(1)")
-check(0 not in b, "0 in set(1)")
-check(1 in c, "1 not in seq(1)")
-check(0 not in c, "0 in seq(1)")
-
-try:
- 1 in a
- check(0, "in base_set did not raise error")
-except AttributeError:
- pass
-
-try:
- 1 not in a
- check(0, "not in base_set did not raise error")
-except AttributeError:
- pass
-
-# Test char in string
-
-check('c' in 'abc', "'c' not in 'abc'")
-check('d' not in 'abc', "'d' in 'abc'")
-
-try:
- '' in 'abc'
- check(0, "'' in 'abc' did not raise error")
-except TypeError:
- pass
-
-try:
- 'ab' in 'abc'
- check(0, "'ab' in 'abc' did not raise error")
-except TypeError:
- pass
-
-try:
- None in 'abc'
- check(0, "None in 'abc' did not raise error")
-except TypeError:
- pass
-
-# Test char in Unicode
-
-check('c' in u'abc', "'c' not in u'abc'")
-check('d' not in u'abc', "'d' in u'abc'")
-
-try:
- '' in u'abc'
- check(0, "'' in u'abc' did not raise error")
-except TypeError:
- pass
-
-try:
- 'ab' in u'abc'
- check(0, "'ab' in u'abc' did not raise error")
-except TypeError:
- pass
-
-try:
- None in u'abc'
- check(0, "None in u'abc' did not raise error")
-except TypeError:
- pass
-
-# Test Unicode char in Unicode
-
-check(u'c' in u'abc', "u'c' not in u'abc'")
-check(u'd' not in u'abc', "u'd' in u'abc'")
-
-try:
- u'' in u'abc'
- check(0, "u'' in u'abc' did not raise error")
-except TypeError:
- pass
-
-try:
- u'ab' in u'abc'
- check(0, "u'ab' in u'abc' did not raise error")
-except TypeError:
- pass
-
-# Test Unicode char in string
-
-check(u'c' in 'abc', "u'c' not in 'abc'")
-check(u'd' not in 'abc', "u'd' in 'abc'")
-
-try:
- u'' in 'abc'
- check(0, "u'' in 'abc' did not raise error")
-except TypeError:
- pass
-
-try:
- u'ab' in 'abc'
- check(0, "u'ab' in 'abc' did not raise error")
-except TypeError:
- pass
-
-# A collection of tests on builtin sequence types
-a = range(10)
-for i in a:
- check(i in a, "%s not in %s" % (`i`, `a`))
-check(16 not in a, "16 not in %s" % `a`)
-check(a not in a, "%s not in %s" % (`a`, `a`))
-
-a = tuple(a)
-for i in a:
- check(i in a, "%s not in %s" % (`i`, `a`))
-check(16 not in a, "16 not in %s" % `a`)
-check(a not in a, "%s not in %s" % (`a`, `a`))
-
-class Deviant1:
- """Behaves strangely when compared
-
- This class is designed to make sure that the contains code
- works when the list is modified during the check.
- """
-
- aList = range(15)
-
- def __cmp__(self, other):
- if other == 12:
- self.aList.remove(12)
- self.aList.remove(13)
- self.aList.remove(14)
- return 1
-
-check(Deviant1() not in Deviant1.aList, "Deviant1 failed")
-
-class Deviant2:
- """Behaves strangely when compared
-
- This class raises an exception during comparison. That in
- turn causes the comparison to fail with a TypeError.
- """
-
- def __cmp__(self, other):
- if other == 4:
- raise RuntimeError, "gotcha"
-
-try:
- check(Deviant2() not in a, "oops")
-except TypeError:
- pass
diff --git a/Lib/dos-8x3/test_coo.py b/Lib/dos-8x3/test_coo.py
deleted file mode 100644
index a275cf2..0000000
--- a/Lib/dos-8x3/test_coo.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-# Simple test suite for Cookie.py
-
-import Cookie
-
-# Currently this only tests SimpleCookie
-
-cases = [
- ('chips=ahoy; vienna=finger', {'chips':'ahoy', 'vienna':'finger'}),
- ('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;";',
- {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'}),
- ]
-
-for data, dict in cases:
- C = Cookie.SimpleCookie() ; C.load(data)
- print repr(C)
- print str(C)
- for k, v in dict.items():
- print ' ', k, repr( C[k].value ), repr(v)
- assert C[k].value == v
- print C[k]
-
-C = Cookie.SimpleCookie()
-C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme')
-
-assert C['Customer'].value == 'WILE_E_COYOTE'
-assert C['Customer']['version'] == '1'
-assert C['Customer']['path'] == '/acme'
-
-print C.output(['path'])
-print C.js_output()
-print C.js_output(['path'])
-
-# Try cookie with quoted meta-data
-C = Cookie.SimpleCookie()
-C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
-assert C['Customer'].value == 'WILE_E_COYOTE'
-assert C['Customer']['version'] == '1'
-assert C['Customer']['path'] == '/acme'
-
diff --git a/Lib/dos-8x3/test_cop.py b/Lib/dos-8x3/test_cop.py
deleted file mode 100644
index 0324d92..0000000
--- a/Lib/dos-8x3/test_cop.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import copy_reg
-
-class C:
- pass
-
-
-try:
- copy_reg.pickle(C, None, None)
-except TypeError, e:
- print "Caught expected TypeError:"
- print e
-else:
- print "Failed to catch expected TypeError when registering a class type."
-
-
-print
-try:
- copy_reg.pickle(type(1), "not a callable")
-except TypeError, e:
- print "Caught expected TypeError:"
- print e
-else:
- print "Failed to catch TypeError " \
- "when registering a non-callable reduction function."
-
-
-print
-try:
- copy_reg.pickle(type(1), int, "not a callable")
-except TypeError, e:
- print "Caught expected TypeError:"
- print e
-else:
- print "Failed to catch TypeError " \
- "when registering a non-callable constructor."
diff --git a/Lib/dos-8x3/test_cpi.py b/Lib/dos-8x3/test_cpi.py
deleted file mode 100644
index f2aa0fe..0000000
--- a/Lib/dos-8x3/test_cpi.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Test the cPickle module
-
-import cPickle
-import test_pickle
-test_pickle.dotest(cPickle)
diff --git a/Lib/dos-8x3/test_cry.py b/Lib/dos-8x3/test_cry.py
deleted file mode 100644
index 0685c95..0000000
--- a/Lib/dos-8x3/test_cry.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#! /usr/bin/env python
-"""Simple test script for cryptmodule.c
- Roger E. Masse
-"""
-
-from test_support import verbose
-import crypt
-
-c = crypt.crypt('mypassword', 'ab')
-if verbose:
- print 'Test encryption: ', c
diff --git a/Lib/dos-8x3/test_dos.py b/Lib/dos-8x3/test_dos.py
deleted file mode 100644
index ffa3ef7..0000000
--- a/Lib/dos-8x3/test_dos.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import dospath
-import string
-import os
-
-errors = 0
-
-def tester(fn, wantResult):
- fn = string.replace(fn, "\\", "\\\\")
- gotResult = eval(fn)
- if wantResult != gotResult:
- print "error!"
- print "evaluated: " + str(fn)
- print "should be: " + str(wantResult)
- print " returned: " + str(gotResult)
- print ""
- global errors
- errors = errors + 1
-
-tester('dospath.splitdrive("c:\\foo\\bar")', ('c:', '\\foo\\bar'))
-tester('dospath.splitdrive("c:/foo/bar")', ('c:', '/foo/bar'))
-
-tester('dospath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
-tester('dospath.split("\\\\conky\\mountpoint\\foo\\bar")', ('\\\\conky\\mountpoint\\foo', 'bar'))
-
-tester('dospath.split("c:\\")', ('c:\\', ''))
-tester('dospath.split("\\\\conky\\mountpoint\\")', ('\\\\conky\\mountpoint', ''))
-
-tester('dospath.split("c:/")', ('c:/', ''))
-tester('dospath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
-
-tester('dospath.isabs("c:\\")', 1)
-tester('dospath.isabs("\\\\conky\\mountpoint\\")', 1)
-tester('dospath.isabs("\\foo")', 1)
-tester('dospath.isabs("\\foo\\bar")', 1)
-
-tester('dospath.abspath("C:\\")', "C:\\")
-
-tester('dospath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
- "/home/swen")
-tester('dospath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
- "\\home\\swen\\")
-tester('dospath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
- "/home/swen/spam")
-
-if errors:
- print str(errors) + " errors."
-else:
- print "No errors. Thank your lucky stars."
-
diff --git a/Lib/dos-8x3/test_err.py b/Lib/dos-8x3/test_err.py
deleted file mode 100644
index cb1e729..0000000
--- a/Lib/dos-8x3/test_err.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#! /usr/bin/env python
-"""Test the errno module
- Roger E. Masse
-"""
-
-import errno
-from test_support import verbose
-
-errors = ['E2BIG', 'EACCES', 'EADDRINUSE', 'EADDRNOTAVAIL', 'EADV',
- 'EAFNOSUPPORT', 'EAGAIN', 'EALREADY', 'EBADE', 'EBADF',
- 'EBADFD', 'EBADMSG', 'EBADR', 'EBADRQC', 'EBADSLT',
- 'EBFONT', 'EBUSY', 'ECHILD', 'ECHRNG', 'ECOMM',
- 'ECONNABORTED', 'ECONNREFUSED', 'ECONNRESET',
- 'EDEADLK', 'EDEADLOCK', 'EDESTADDRREQ', 'EDOM',
- 'EDQUOT', 'EEXIST', 'EFAULT', 'EFBIG', 'EHOSTDOWN',
- 'EHOSTUNREACH', 'EIDRM', 'EILSEQ', 'EINPROGRESS',
- 'EINTR', 'EINVAL', 'EIO', 'EISCONN', 'EISDIR',
- 'EL2HLT', 'EL2NSYNC', 'EL3HLT', 'EL3RST', 'ELIBACC',
- 'ELIBBAD', 'ELIBEXEC', 'ELIBMAX', 'ELIBSCN', 'ELNRNG',
- 'ELOOP', 'EMFILE', 'EMLINK', 'EMSGSIZE', 'EMULTIHOP',
- 'ENAMETOOLONG', 'ENETDOWN', 'ENETRESET', 'ENETUNREACH',
- 'ENFILE', 'ENOANO', 'ENOBUFS', 'ENOCSI', 'ENODATA',
- 'ENODEV', 'ENOENT', 'ENOEXEC', 'ENOLCK', 'ENOLINK',
- 'ENOMEM', 'ENOMSG', 'ENONET', 'ENOPKG', 'ENOPROTOOPT',
- 'ENOSPC', 'ENOSR', 'ENOSTR', 'ENOSYS', 'ENOTBLK',
- 'ENOTCONN', 'ENOTDIR', 'ENOTEMPTY', 'ENOTOBACCO', 'ENOTSOCK',
- 'ENOTTY', 'ENOTUNIQ', 'ENXIO', 'EOPNOTSUPP',
- 'EOVERFLOW', 'EPERM', 'EPFNOSUPPORT', 'EPIPE',
- 'EPROTO', 'EPROTONOSUPPORT', 'EPROTOTYPE',
- 'ERANGE', 'EREMCHG', 'EREMOTE', 'ERESTART',
- 'EROFS', 'ESHUTDOWN', 'ESOCKTNOSUPPORT', 'ESPIPE',
- 'ESRCH', 'ESRMNT', 'ESTALE', 'ESTRPIPE', 'ETIME',
- 'ETIMEDOUT', 'ETOOMANYREFS', 'ETXTBSY', 'EUNATCH',
- 'EUSERS', 'EWOULDBLOCK', 'EXDEV', 'EXFULL']
-
-#
-# This is is a wee bit bogus since the module only conditionally adds
-# errno constants if they have been defined by errno.h However, this
-# test seems to work on SGI, Sparc & intel Solaris, and linux.
-#
-for error in errors:
- try:
- a = getattr(errno, error)
- except AttributeError:
- if verbose:
- print '%s: not found' % error
- else:
- if verbose:
- print '%s: %d' % (error, a)
diff --git a/Lib/dos-8x3/test_exc.py b/Lib/dos-8x3/test_exc.py
deleted file mode 100755
index 076f470..0000000
--- a/Lib/dos-8x3/test_exc.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# Python test set -- part 5, built-in exceptions
-
-from test_support import *
-from types import ClassType
-
-print '5. Built-in exceptions'
-# XXX This is not really enough, each *operation* should be tested!
-
-def test_raise_catch(exc):
- try:
- raise exc, "spam"
- except exc, err:
- buf = str(err)
- try:
- raise exc("spam")
- except exc, err:
- buf = str(err)
- print buf
-
-def r(thing):
- test_raise_catch(thing)
- if isinstance(thing, ClassType):
- print thing.__name__
- else:
- print thing
-
-r(AttributeError)
-import sys
-try: x = sys.undefined_attribute
-except AttributeError: pass
-
-r(EOFError)
-import sys
-fp = open(TESTFN, 'w')
-fp.close()
-fp = open(TESTFN, 'r')
-savestdin = sys.stdin
-try:
- try:
- sys.stdin = fp
- x = raw_input()
- except EOFError:
- pass
-finally:
- sys.stdin = savestdin
- fp.close()
-
-r(IOError)
-try: open('this file does not exist', 'r')
-except IOError: pass
-
-r(ImportError)
-try: import undefined_module
-except ImportError: pass
-
-r(IndexError)
-x = []
-try: a = x[10]
-except IndexError: pass
-
-r(KeyError)
-x = {}
-try: a = x['key']
-except KeyError: pass
-
-r(KeyboardInterrupt)
-print '(not testable in a script)'
-
-r(MemoryError)
-print '(not safe to test)'
-
-r(NameError)
-try: x = undefined_variable
-except NameError: pass
-
-r(OverflowError)
-x = 1
-try:
- while 1: x = x+x
-except OverflowError: pass
-
-r(RuntimeError)
-print '(not used any more?)'
-
-r(SyntaxError)
-try: exec '/\n'
-except SyntaxError: pass
-
-# make sure the right exception message is raised for each of these
-# code fragments:
-
-def ckmsg(src, msg):
- try:
- compile(src, '<fragment>', 'exec')
- except SyntaxError, e:
- print e.msg
- if e.msg == msg:
- print "ok"
- else:
- print "expected:", msg
- else:
- print "failed to get expected SyntaxError"
-
-s = '''\
-while 1:
- try:
- continue
- except:
- pass
-'''
-ckmsg(s, "'continue' not supported inside 'try' clause")
-s = '''\
-while 1:
- try:
- continue
- finally:
- pass
-'''
-ckmsg(s, "'continue' not supported inside 'try' clause")
-s = '''\
-while 1:
- try:
- if 1:
- continue
- finally:
- pass
-'''
-ckmsg(s, "'continue' not supported inside 'try' clause")
-s = '''\
-try:
- continue
-except:
- pass
-'''
-ckmsg(s, "'continue' not properly in loop")
-ckmsg("continue\n", "'continue' not properly in loop")
-
-r(IndentationError)
-
-r(TabError)
-# can only be tested under -tt, and is the only test for -tt
-#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
-#except TabError: pass
-#else: raise TestFailed
-
-r(SystemError)
-print '(hard to reproduce)'
-
-r(SystemExit)
-import sys
-try: sys.exit(0)
-except SystemExit: pass
-
-r(TypeError)
-try: [] + ()
-except TypeError: pass
-
-r(ValueError)
-try: x = chr(10000)
-except ValueError: pass
-
-r(ZeroDivisionError)
-try: x = 1/0
-except ZeroDivisionError: pass
-
-r(Exception)
-try: x = 1/0
-except Exception, e: pass
-
-unlink(TESTFN)
diff --git a/Lib/dos-8x3/test_ext.py b/Lib/dos-8x3/test_ext.py
deleted file mode 100644
index de2312b..0000000
--- a/Lib/dos-8x3/test_ext.py
+++ /dev/null
@@ -1,146 +0,0 @@
-from UserList import UserList
-
-def f(*a, **k):
- print a, k
-
-def g(x, *y, **z):
- print x, y, z
-
-def h(j=1, a=2, h=3):
- print j, a, h
-
-f()
-f(1)
-f(1, 2)
-f(1, 2, 3)
-
-f(1, 2, 3, *(4, 5))
-f(1, 2, 3, *[4, 5])
-f(1, 2, 3, *UserList([4, 5]))
-f(1, 2, 3, **{'a':4, 'b':5})
-f(1, 2, 3, *(4, 5), **{'a':6, 'b':7})
-f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b':9})
-
-try:
- g()
-except TypeError, err:
- print "TypeError:", err
-else:
- print "should raise TypeError: not enough arguments; expected 1, got 0"
-
-try:
- g(*())
-except TypeError, err:
- print "TypeError:", err
-else:
- print "should raise TypeError: not enough arguments; expected 1, got 0"
-
-try:
- g(*(), **{})
-except TypeError, err:
- print "TypeError:", err
-else:
- print "should raise TypeError: not enough arguments; expected 1, got 0"
-
-g(1)
-g(1, 2)
-g(1, 2, 3)
-g(1, 2, 3, *(4, 5))
-class Nothing: pass
-try:
- g(*Nothing())
-except AttributeError, attr:
- pass
-else:
- print "should raise AttributeError: __len__"
-
-class Nothing:
- def __len__(self):
- return 5
-try:
- g(*Nothing())
-except AttributeError, attr:
- pass
-else:
- print "should raise AttributeError: __getitem__"
-
-class Nothing:
- def __len__(self):
- return 5
- def __getitem__(self, i):
- if i < 3:
- return i
- else:
- raise IndexError, i
-g(*Nothing())
-
-# make sure the function call doesn't stomp on the dictionary?
-d = {'a': 1, 'b': 2, 'c': 3}
-d2 = d.copy()
-assert d == d2
-g(1, d=4, **d)
-print d
-print d2
-assert d == d2, "function call modified dictionary"
-
-# what about willful misconduct?
-def saboteur(**kw):
- kw['x'] = locals() # yields a cyclic kw
- return kw
-d = {}
-kw = saboteur(a=1, **d)
-assert d == {}
-# break the cycle
-del kw['x']
-
-try:
- g(1, 2, 3, **{'x':4, 'y':5})
-except TypeError, err:
- print err
-else:
- print "should raise TypeError: keyword parameter redefined"
-
-try:
- g(1, 2, 3, a=4, b=5, *(6, 7), **{'a':8, 'b':9})
-except TypeError, err:
- print err
-else:
- print "should raise TypeError: keyword parameter redefined"
-
-try:
- f(**{1:2})
-except TypeError, err:
- print err
-else:
- print "should raise TypeError: keywords must be strings"
-
-try:
- h(**{'e': 2})
-except TypeError, err:
- print err
-else:
- print "should raise TypeError: unexpected keyword argument: e"
-
-try:
- h(*h)
-except TypeError, err:
- print err
-else:
- print "should raise TypeError: * argument must be a tuple"
-
-try:
- h(**h)
-except TypeError, err:
- print err
-else:
- print "should raise TypeError: ** argument must be a dictionary"
-
-def f2(*a, **b):
- return a, b
-
-d = {}
-for i in range(512):
- key = 'k%d' % i
- d[key] = i
-a, b = f2(1, *(2, 3), **d)
-print len(a), len(b), b == d
diff --git a/Lib/dos-8x3/test_fcn.py b/Lib/dos-8x3/test_fcn.py
deleted file mode 100644
index a1da0dd..0000000
--- a/Lib/dos-8x3/test_fcn.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#! /usr/bin/env python
-"""Test program for the fcntl C module.
- Roger E. Masse
-"""
-import struct
-import fcntl
-import FCNTL
-import os, sys
-from test_support import verbose
-
-filename = '/tmp/delete-me'
-
-# the example from the library docs
-f = open(filename,'w')
-rv = fcntl.fcntl(f.fileno(), FCNTL.F_SETFL, os.O_NONBLOCK)
-if verbose:
- print 'Status from fnctl with O_NONBLOCK: ', rv
-
-if sys.platform in ('netbsd1', 'Darwin1.2',
- 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
- 'bsdos2', 'bsdos3', 'bsdos4',
- 'openbsd', 'openbsd2'):
- lockdata = struct.pack('lxxxxlxxxxlhh', 0, 0, 0, FCNTL.F_WRLCK, 0)
-elif sys.platform in ['aix3', 'aix4', 'hp-uxB']:
- lockdata = struct.pack('hhlllii', FCNTL.F_WRLCK, 0, 0, 0, 0, 0, 0)
-else:
- lockdata = struct.pack('hhllhh', FCNTL.F_WRLCK, 0, 0, 0, 0, 0)
-if verbose:
- print 'struct.pack: ', `lockdata`
-
-rv = fcntl.fcntl(f.fileno(), FCNTL.F_SETLKW, lockdata)
-if verbose:
- print 'String from fcntl with F_SETLKW: ', `rv`
-
-f.close()
-os.unlink(filename)
diff --git a/Lib/dos-8x3/test_fil.py b/Lib/dos-8x3/test_fil.py
deleted file mode 100644
index 4ad5c95..0000000
--- a/Lib/dos-8x3/test_fil.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from test_support import TESTFN
-from UserList import UserList
-
-# verify writelines with instance sequence
-l = UserList(['1', '2'])
-f = open(TESTFN, 'wb')
-f.writelines(l)
-f.close()
-f = open(TESTFN, 'rb')
-buf = f.read()
-f.close()
-assert buf == '12'
-
-# verify writelines with integers
-f = open(TESTFN, 'wb')
-try:
- f.writelines([1, 2, 3])
-except TypeError:
- pass
-else:
- print "writelines accepted sequence of integers"
-f.close()
-
-# verify writelines with integers in UserList
-f = open(TESTFN, 'wb')
-l = UserList([1,2,3])
-try:
- f.writelines(l)
-except TypeError:
- pass
-else:
- print "writelines accepted sequence of integers"
-f.close()
-
-# verify writelines with non-string object
-class NonString: pass
-
-f = open(TESTFN, 'wb')
-try:
- f.writelines([NonString(), NonString()])
-except TypeError:
- pass
-else:
- print "writelines accepted sequence of non-string objects"
-f.close()
diff --git a/Lib/dos-8x3/test_for.py b/Lib/dos-8x3/test_for.py
deleted file mode 100644
index 4fd2662..0000000
--- a/Lib/dos-8x3/test_for.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""This test checks for correct fork() behavior.
-
-We want fork1() semantics -- only the forking thread survives in the
-child after a fork().
-
-On some systems (e.g. Solaris without posix threads) we find that all
-active threads survive in the child after a fork(); this is an error.
-
-On BeOS, you CANNOT mix threads and fork(), the behaviour is undefined.
-That's OK, fork() is a grotesque hack anyway. ;-) [cjh]
-
-"""
-
-import os, sys, time, thread
-from test_support import TestSkipped
-
-try:
- if os.uname()[0] == "BeOS":
- raise TestSkipped, "can't mix os.fork with threads on BeOS"
-except AttributeError:
- pass
-
-try:
- os.fork
-except AttributeError:
- raise TestSkipped, "os.fork not defined -- skipping test_fork1"
-
-LONGSLEEP = 2
-
-SHORTSLEEP = 0.5
-
-NUM_THREADS = 4
-
-alive = {}
-
-stop = 0
-
-def f(id):
- while not stop:
- alive[id] = os.getpid()
- try:
- time.sleep(SHORTSLEEP)
- except IOError:
- pass
-
-def main():
- for i in range(NUM_THREADS):
- thread.start_new(f, (i,))
-
- time.sleep(LONGSLEEP)
-
- a = alive.keys()
- a.sort()
- assert a == range(NUM_THREADS)
-
- prefork_lives = alive.copy()
-
- cpid = os.fork()
-
- if cpid == 0:
- # Child
- time.sleep(LONGSLEEP)
- n = 0
- for key in alive.keys():
- if alive[key] != prefork_lives[key]:
- n = n+1
- os._exit(n)
- else:
- # Parent
- spid, status = os.waitpid(cpid, 0)
- assert spid == cpid
- assert status == 0, "cause = %d, exit = %d" % (status&0xff, status>>8)
- global stop
- # Tell threads to die
- stop = 1
- time.sleep(2*SHORTSLEEP) # Wait for threads to die
-
-main()
diff --git a/Lib/dos-8x3/test_gdb.py b/Lib/dos-8x3/test_gdb.py
deleted file mode 100644
index 030218e..0000000
--- a/Lib/dos-8x3/test_gdb.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#! /usr/bin/env python
-"""Test script for the gdbm module
- Roger E. Masse
-"""
-
-import gdbm
-from gdbm import error
-from test_support import verbose, TestFailed
-
-filename= '/tmp/delete_me'
-
-g = gdbm.open(filename, 'c')
-g['a'] = 'b'
-g['12345678910'] = '019237410982340912840198242'
-a = g.keys()
-if verbose:
- print 'Test gdbm file keys: ', a
-
-g.has_key('a')
-g.close()
-try:
- g['a']
-except error:
- pass
-else:
- raise TestFailed, "expected gdbm.error accessing closed database"
-g = gdbm.open(filename, 'r')
-g.close()
-g = gdbm.open(filename, 'rw')
-g.close()
-g = gdbm.open(filename, 'w')
-g.close()
-g = gdbm.open(filename, 'n')
-g.close()
-
-try:
- import os
- os.unlink(filename)
-except:
- pass
diff --git a/Lib/dos-8x3/test_get.py b/Lib/dos-8x3/test_get.py
deleted file mode 100644
index 3927c1c..0000000
--- a/Lib/dos-8x3/test_get.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# test_getopt.py
-# David Goodger <dgoodger@bigfoot.com> 2000-08-19
-
-import getopt
-from getopt import GetoptError
-from test_support import verbose
-
-def expectException(teststr, expected, failure=AssertionError):
- """Executes a statement passed in teststr, and raises an exception
- (failure) if the expected exception is *not* raised."""
- try:
- exec teststr
- except expected:
- pass
- else:
- raise failure
-
-if verbose:
- print 'Running tests on getopt.short_has_arg'
-assert getopt.short_has_arg('a', 'a:')
-assert not getopt.short_has_arg('a', 'a')
-expectException("tmp = getopt.short_has_arg('a', 'b')", GetoptError)
-expectException("tmp = getopt.short_has_arg('a', '')", GetoptError)
-
-if verbose:
- print 'Running tests on getopt.long_has_args'
-has_arg, option = getopt.long_has_args('abc', ['abc='])
-assert has_arg
-assert option == 'abc'
-has_arg, option = getopt.long_has_args('abc', ['abc'])
-assert not has_arg
-assert option == 'abc'
-has_arg, option = getopt.long_has_args('abc', ['abcd'])
-assert not has_arg
-assert option == 'abcd'
-expectException("has_arg, option = getopt.long_has_args('abc', ['def'])",
- GetoptError)
-expectException("has_arg, option = getopt.long_has_args('abc', [])",
- GetoptError)
-expectException("has_arg, option = " + \
- "getopt.long_has_args('abc', ['abcd','abcde'])",
- GetoptError)
-
-if verbose:
- print 'Running tests on getopt.do_shorts'
-opts, args = getopt.do_shorts([], 'a', 'a', [])
-assert opts == [('-a', '')]
-assert args == []
-opts, args = getopt.do_shorts([], 'a1', 'a:', [])
-assert opts == [('-a', '1')]
-assert args == []
-#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
-#assert opts == [('-a', '1')]
-#assert args == []
-opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
-assert opts == [('-a', '1')]
-assert args == []
-opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
-assert opts == [('-a', '1')]
-assert args == ['2']
-expectException("opts, args = getopt.do_shorts([], 'a1', 'a', [])",
- GetoptError)
-expectException("opts, args = getopt.do_shorts([], 'a', 'a:', [])",
- GetoptError)
-
-if verbose:
- print 'Running tests on getopt.do_longs'
-opts, args = getopt.do_longs([], 'abc', ['abc'], [])
-assert opts == [('--abc', '')]
-assert args == []
-opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
-assert opts == [('--abc', '1')]
-assert args == []
-opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
-assert opts == [('--abcd', '1')]
-assert args == []
-expectException("opts, args = getopt.do_longs([], 'abc=1', ['abc'], [])",
- GetoptError)
-expectException("opts, args = getopt.do_longs([], 'abc', ['abc='], [])",
- GetoptError)
-
-# note: the empty string between '-a' and '--beta' is significant:
-# it simulates an empty string option argument ('-a ""') on the command line.
-cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a', '',
- '--beta', 'arg1', 'arg2']
-
-if verbose:
- print 'Running tests on getopt.getopt'
-opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
-assert opts == [('-a', '1'), ('-b', ''), ('--alpha', '2'), ('--beta', ''),
- ('-a', '3'), ('-a', ''), ('--beta', '')]
-# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
-# accounted for in the code that calls getopt().
-assert args == ['arg1', 'arg2']
-
-expectException(
- "opts, args = getopt.getopt(cmdline, 'a:b', ['alpha', 'beta'])",
- GetoptError)
-
-if verbose:
- print "Module getopt: tests completed successfully."
diff --git a/Lib/dos-8x3/test_gra.py b/Lib/dos-8x3/test_gra.py
deleted file mode 100755
index 0ca5a46..0000000
--- a/Lib/dos-8x3/test_gra.py
+++ /dev/null
@@ -1,649 +0,0 @@
-# Python test set -- part 1, grammar.
-# This just tests whether the parser accepts them all.
-
-from test_support import *
-
-print '1. Parser'
-
-print '1.1 Tokens'
-
-print '1.1.1 Backslashes'
-
-# Backslash means line continuation:
-x = 1 \
-+ 1
-if x <> 2: raise TestFailed, 'backslash for line continuation'
-
-# Backslash does not means continuation in comments :\
-x = 0
-if x <> 0: raise TestFailed, 'backslash ending comment'
-
-print '1.1.2 Numeric literals'
-
-print '1.1.2.1 Plain integers'
-if 0xff <> 255: raise TestFailed, 'hex int'
-if 0377 <> 255: raise TestFailed, 'octal int'
-if 2147483647 != 017777777777: raise TestFailed, 'large positive int'
-try:
- from sys import maxint
-except ImportError:
- maxint = 2147483647
-if maxint == 2147483647:
- if -2147483647-1 != 020000000000: raise TestFailed, 'max negative int'
- # XXX -2147483648
- if 037777777777 != -1: raise TestFailed, 'oct -1'
- if 0xffffffff != -1: raise TestFailed, 'hex -1'
- for s in '2147483648', '040000000000', '0x100000000':
- try:
- x = eval(s)
- except OverflowError:
- continue
-## raise TestFailed, \
- print \
- 'No OverflowError on huge integer literal ' + `s`
-elif eval('maxint == 9223372036854775807'):
- if eval('-9223372036854775807-1 != 01000000000000000000000'):
- raise TestFailed, 'max negative int'
- if eval('01777777777777777777777') != -1: raise TestFailed, 'oct -1'
- if eval('0xffffffffffffffff') != -1: raise TestFailed, 'hex -1'
- for s in '9223372036854775808', '02000000000000000000000', \
- '0x10000000000000000':
- try:
- x = eval(s)
- except OverflowError:
- continue
- raise TestFailed, \
- 'No OverflowError on huge integer literal ' + `s`
-else:
- print 'Weird maxint value', maxint
-
-print '1.1.2.2 Long integers'
-x = 0L
-x = 0l
-x = 0xffffffffffffffffL
-x = 0xffffffffffffffffl
-x = 077777777777777777L
-x = 077777777777777777l
-x = 123456789012345678901234567890L
-x = 123456789012345678901234567890l
-
-print '1.1.2.3 Floating point'
-x = 3.14
-x = 314.
-x = 0.314
-# XXX x = 000.314
-x = .314
-x = 3e14
-x = 3E14
-x = 3e-14
-x = 3e+14
-x = 3.e14
-x = .3e14
-x = 3.1e4
-
-print '1.1.3 String literals'
-
-##def assert(s):
-## if not s: raise TestFailed, 'see traceback'
-
-x = ''; y = ""; assert(len(x) == 0 and x == y)
-x = '\''; y = "'"; assert(len(x) == 1 and x == y and ord(x) == 39)
-x = '"'; y = "\""; assert(len(x) == 1 and x == y and ord(x) == 34)
-x = "doesn't \"shrink\" does it"
-y = 'doesn\'t "shrink" does it'
-assert(len(x) == 24 and x == y)
-x = "does \"shrink\" doesn't it"
-y = 'does "shrink" doesn\'t it'
-assert(len(x) == 24 and x == y)
-x = """
-The "quick"
-brown fox
-jumps over
-the 'lazy' dog.
-"""
-y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
-assert(x == y)
-y = '''
-The "quick"
-brown fox
-jumps over
-the 'lazy' dog.
-'''; assert(x == y)
-y = "\n\
-The \"quick\"\n\
-brown fox\n\
-jumps over\n\
-the 'lazy' dog.\n\
-"; assert(x == y)
-y = '\n\
-The \"quick\"\n\
-brown fox\n\
-jumps over\n\
-the \'lazy\' dog.\n\
-'; assert(x == y)
-
-
-print '1.2 Grammar'
-
-print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
-# XXX can't test in a script -- this rule is only used when interactive
-
-print 'file_input' # (NEWLINE | stmt)* ENDMARKER
-# Being tested as this very moment this very module
-
-print 'expr_input' # testlist NEWLINE
-# XXX Hard to test -- used only in calls to input()
-
-print 'eval_input' # testlist ENDMARKER
-x = eval('1, 0 or 1')
-
-print 'funcdef'
-### 'def' NAME parameters ':' suite
-### parameters: '(' [varargslist] ')'
-### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
-### | ('**'|'*' '*') NAME)
-### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
-### fpdef: NAME | '(' fplist ')'
-### fplist: fpdef (',' fpdef)* [',']
-### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
-### argument: [test '='] test # Really [keyword '='] test
-def f1(): pass
-f1()
-f1(*())
-f1(*(), **{})
-def f2(one_argument): pass
-def f3(two, arguments): pass
-def f4(two, (compound, (argument, list))): pass
-def a1(one_arg,): pass
-def a2(two, args,): pass
-def v0(*rest): pass
-def v1(a, *rest): pass
-def v2(a, b, *rest): pass
-def v3(a, (b, c), *rest): pass
-def d01(a=1): pass
-d01()
-d01(1)
-d01(*(1,))
-d01(**{'a':2})
-def d11(a, b=1): pass
-d11(1)
-d11(1, 2)
-d11(1, **{'b':2})
-def d21(a, b, c=1): pass
-d21(1, 2)
-d21(1, 2, 3)
-d21(*(1, 2, 3))
-d21(1, *(2, 3))
-d21(1, 2, *(3,))
-d21(1, 2, **{'c':3})
-def d02(a=1, b=2): pass
-d02()
-d02(1)
-d02(1, 2)
-d02(*(1, 2))
-d02(1, *(2,))
-d02(1, **{'b':2})
-d02(**{'a': 1, 'b': 2})
-def d12(a, b=1, c=2): pass
-d12(1)
-d12(1, 2)
-d12(1, 2, 3)
-def d22(a, b, c=1, d=2): pass
-d22(1, 2)
-d22(1, 2, 3)
-d22(1, 2, 3, 4)
-def d01v(a=1, *rest): pass
-d01v()
-d01v(1)
-d01v(1, 2)
-d01v(*(1, 2, 3, 4))
-d01v(*(1,))
-d01v(**{'a':2})
-def d11v(a, b=1, *rest): pass
-d11v(1)
-d11v(1, 2)
-d11v(1, 2, 3)
-def d21v(a, b, c=1, *rest): pass
-d21v(1, 2)
-d21v(1, 2, 3)
-d21v(1, 2, 3, 4)
-d21v(*(1, 2, 3, 4))
-d21v(1, 2, **{'c': 3})
-def d02v(a=1, b=2, *rest): pass
-d02v()
-d02v(1)
-d02v(1, 2)
-d02v(1, 2, 3)
-d02v(1, *(2, 3, 4))
-d02v(**{'a': 1, 'b': 2})
-def d12v(a, b=1, c=2, *rest): pass
-d12v(1)
-d12v(1, 2)
-d12v(1, 2, 3)
-d12v(1, 2, 3, 4)
-d12v(*(1, 2, 3, 4))
-d12v(1, 2, *(3, 4, 5))
-d12v(1, *(2,), **{'c': 3})
-def d22v(a, b, c=1, d=2, *rest): pass
-d22v(1, 2)
-d22v(1, 2, 3)
-d22v(1, 2, 3, 4)
-d22v(1, 2, 3, 4, 5)
-d22v(*(1, 2, 3, 4))
-d22v(1, 2, *(3, 4, 5))
-d22v(1, *(2, 3), **{'d': 4})
-
-### stmt: simple_stmt | compound_stmt
-# Tested below
-
-### simple_stmt: small_stmt (';' small_stmt)* [';']
-print 'simple_stmt'
-x = 1; pass; del x
-
-### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
-# Tested below
-
-print 'expr_stmt' # (exprlist '=')* exprlist
-1
-1, 2, 3
-x = 1
-x = 1, 2, 3
-x = y = z = 1, 2, 3
-x, y, z = 1, 2, 3
-abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
-# NB these variables are deleted below
-
-print 'print_stmt' # 'print' (test ',')* [test]
-print 1, 2, 3
-print 1, 2, 3,
-print
-print 0 or 1, 0 or 1,
-print 0 or 1
-
-print 'extended print_stmt' # 'print' '>>' test ','
-import sys
-print >> sys.stdout, 1, 2, 3
-print >> sys.stdout, 1, 2, 3,
-print >> sys.stdout
-print >> sys.stdout, 0 or 1, 0 or 1,
-print >> sys.stdout, 0 or 1
-
-# test printing to an instance
-class Gulp:
- def write(self, msg): pass
-
-gulp = Gulp()
-print >> gulp, 1, 2, 3
-print >> gulp, 1, 2, 3,
-print >> gulp
-print >> gulp, 0 or 1, 0 or 1,
-print >> gulp, 0 or 1
-
-# test print >> None
-def driver():
- oldstdout = sys.stdout
- sys.stdout = Gulp()
- try:
- tellme(Gulp())
- tellme()
- finally:
- sys.stdout = oldstdout
-
-# we should see this once
-def tellme(file=sys.stdout):
- print >> file, 'hello world'
-
-driver()
-
-# we should not see this at all
-def tellme(file=None):
- print >> file, 'goodbye universe'
-
-driver()
-
-# syntax errors
-def check_syntax(statement):
- try:
- compile(statement, '<string>', 'exec')
- except SyntaxError:
- pass
- else:
- print 'Missing SyntaxError: "%s"' % statement
-check_syntax('print ,')
-check_syntax('print >> x,')
-
-print 'del_stmt' # 'del' exprlist
-del abc
-del x, y, (z, xyz)
-
-print 'pass_stmt' # 'pass'
-pass
-
-print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
-# Tested below
-
-print 'break_stmt' # 'break'
-while 1: break
-
-print 'continue_stmt' # 'continue'
-i = 1
-while i: i = 0; continue
-
-print 'return_stmt' # 'return' [testlist]
-def g1(): return
-def g2(): return 1
-g1()
-x = g2()
-
-print 'raise_stmt' # 'raise' test [',' test]
-try: raise RuntimeError, 'just testing'
-except RuntimeError: pass
-try: raise KeyboardInterrupt
-except KeyboardInterrupt: pass
-
-print 'import_stmt' # 'import' NAME (',' NAME)* | 'from' NAME 'import' ('*' | NAME (',' NAME)*)
-import sys
-import time, sys
-from time import time
-from sys import *
-from sys import path, argv
-
-print 'global_stmt' # 'global' NAME (',' NAME)*
-def f():
- global a
- global a, b
- global one, two, three, four, five, six, seven, eight, nine, ten
-
-print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
-def f():
- z = None
- del z
- exec 'z=1+1\n'
- if z <> 2: raise TestFailed, 'exec \'z=1+1\'\\n'
- del z
- exec 'z=1+1'
- if z <> 2: raise TestFailed, 'exec \'z=1+1\''
- z = None
- del z
- exec u'z=1+1\n'
- if z <> 2: raise TestFailed, 'exec u\'z=1+1\'\\n'
- del z
- exec u'z=1+1'
- if z <> 2: raise TestFailed, 'exec u\'z=1+1\''
-f()
-g = {}
-exec 'z = 1' in g
-if g.has_key('__builtins__'): del g['__builtins__']
-if g <> {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
-g = {}
-l = {}
-exec 'global a; a = 1; b = 2' in g, l
-if g.has_key('__builtins__'): del g['__builtins__']
-if l.has_key('__builtins__'): del l['__builtins__']
-if (g, l) <> ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g, l'
-
-
-### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
-# Tested below
-
-print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
-if 1: pass
-if 1: pass
-else: pass
-if 0: pass
-elif 0: pass
-if 0: pass
-elif 0: pass
-elif 0: pass
-elif 0: pass
-else: pass
-
-print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
-while 0: pass
-while 0: pass
-else: pass
-
-print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
-for i in 1, 2, 3: pass
-for i, j, k in (): pass
-else: pass
-class Squares:
- def __init__(self, max):
- self.max = max
- self.sofar = []
- def __len__(self): return len(self.sofar)
- def __getitem__(self, i):
- if not 0 <= i < self.max: raise IndexError
- n = len(self.sofar)
- while n <= i:
- self.sofar.append(n*n)
- n = n+1
- return self.sofar[i]
-n = 0
-for x in Squares(10): n = n+x
-if n != 285: raise TestFailed, 'for over growing sequence'
-
-print 'try_stmt'
-### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
-### | 'try' ':' suite 'finally' ':' suite
-### except_clause: 'except' [expr [',' expr]]
-try:
- 1/0
-except ZeroDivisionError:
- pass
-else:
- pass
-try: 1/0
-except EOFError: pass
-except TypeError, msg: pass
-except RuntimeError, msg: pass
-except: pass
-else: pass
-try: 1/0
-except (EOFError, TypeError, ZeroDivisionError): pass
-try: 1/0
-except (EOFError, TypeError, ZeroDivisionError), msg: pass
-try: pass
-finally: pass
-
-print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
-if 1: pass
-if 1:
- pass
-if 1:
- #
- #
- #
- pass
- pass
- #
- pass
- #
-
-print 'test'
-### and_test ('or' and_test)*
-### and_test: not_test ('and' not_test)*
-### not_test: 'not' not_test | comparison
-if not 1: pass
-if 1 and 1: pass
-if 1 or 1: pass
-if not not not 1: pass
-if not 1 and 1 and 1: pass
-if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
-
-print 'comparison'
-### comparison: expr (comp_op expr)*
-### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
-if 1: pass
-x = (1 == 1)
-if 1 == 1: pass
-if 1 != 1: pass
-if 1 <> 1: pass
-if 1 < 1: pass
-if 1 > 1: pass
-if 1 <= 1: pass
-if 1 >= 1: pass
-if 1 is 1: pass
-if 1 is not 1: pass
-if 1 in (): pass
-if 1 not in (): pass
-if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
-
-print 'binary mask ops'
-x = 1 & 1
-x = 1 ^ 1
-x = 1 | 1
-
-print 'shift ops'
-x = 1 << 1
-x = 1 >> 1
-x = 1 << 1 >> 1
-
-print 'additive ops'
-x = 1
-x = 1 + 1
-x = 1 - 1 - 1
-x = 1 - 1 + 1 - 1 + 1
-
-print 'multiplicative ops'
-x = 1 * 1
-x = 1 / 1
-x = 1 % 1
-x = 1 / 1 * 1 % 1
-
-print 'unary ops'
-x = +1
-x = -1
-x = ~1
-x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
-x = -1*1/1 + 1*1 - ---1*1
-
-print 'selectors'
-### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
-### subscript: expr | [expr] ':' [expr]
-f1()
-f2(1)
-f2(1,)
-f3(1, 2)
-f3(1, 2,)
-f4(1, (2, (3, 4)))
-v0()
-v0(1)
-v0(1,)
-v0(1,2)
-v0(1,2,3,4,5,6,7,8,9,0)
-v1(1)
-v1(1,)
-v1(1,2)
-v1(1,2,3)
-v1(1,2,3,4,5,6,7,8,9,0)
-v2(1,2)
-v2(1,2,3)
-v2(1,2,3,4)
-v2(1,2,3,4,5,6,7,8,9,0)
-v3(1,(2,3))
-v3(1,(2,3),4)
-v3(1,(2,3),4,5,6,7,8,9,0)
-print
-import sys, time
-c = sys.path[0]
-x = time.time()
-x = sys.modules['time'].time()
-a = '01234'
-c = a[0]
-c = a[-1]
-s = a[0:5]
-s = a[:5]
-s = a[0:]
-s = a[:]
-s = a[-5:]
-s = a[:-1]
-s = a[-4:-3]
-
-print 'atoms'
-### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
-### dictmaker: test ':' test (',' test ':' test)* [',']
-
-x = (1)
-x = (1 or 2 or 3)
-x = (1 or 2 or 3, 2, 3)
-
-x = []
-x = [1]
-x = [1 or 2 or 3]
-x = [1 or 2 or 3, 2, 3]
-x = []
-
-x = {}
-x = {'one': 1}
-x = {'one': 1,}
-x = {'one' or 'two': 1 or 2}
-x = {'one': 1, 'two': 2}
-x = {'one': 1, 'two': 2,}
-x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
-
-x = `x`
-x = `1 or 2 or 3`
-x = x
-x = 'x'
-x = 123
-
-### exprlist: expr (',' expr)* [',']
-### testlist: test (',' test)* [',']
-# These have been exercised enough above
-
-print 'classdef' # 'class' NAME ['(' testlist ')'] ':' suite
-class B: pass
-class C1(B): pass
-class C2(B): pass
-class D(C1, C2, B): pass
-class C:
- def meth1(self): pass
- def meth2(self, arg): pass
- def meth3(self, a1, a2): pass
-
-# list comprehension tests
-nums = [1, 2, 3, 4, 5]
-strs = ["Apple", "Banana", "Coconut"]
-spcs = [" Apple", " Banana ", "Coco nut "]
-
-print [s.strip() for s in spcs]
-print [3 * x for x in nums]
-print [x for x in nums if x > 2]
-print [(i, s) for i in nums for s in strs]
-print [(i, s) for i in nums for s in [f for f in strs if "n" in f]]
-try:
- eval("[i, s for i in nums for s in strs]")
- print "FAIL: should have raised a SyntaxError!"
-except SyntaxError:
- print "good: got a SyntaxError as expected"
-
-try:
- eval("[x if y]")
- print "FAIL: should have raised a SyntaxError!"
-except SyntaxError:
- print "good: got a SyntaxError as expected"
-
-suppliers = [
- (1, "Boeing"),
- (2, "Ford"),
- (3, "Macdonalds")
-]
-
-parts = [
- (10, "Airliner"),
- (20, "Engine"),
- (30, "Cheeseburger")
-]
-
-suppart = [
- (1, 10), (1, 20), (2, 20), (3, 30)
-]
-
-print [
- (sname, pname)
- for (sno, sname) in suppliers
- for (pno, pname) in parts
- for (sp_sno, sp_pno) in suppart
- if sno == sp_sno and pno == sp_pno
-]
diff --git a/Lib/dos-8x3/test_gzi.py b/Lib/dos-8x3/test_gzi.py
deleted file mode 100644
index 2366d02..0000000
--- a/Lib/dos-8x3/test_gzi.py
+++ /dev/null
@@ -1,54 +0,0 @@
-
-import sys, os
-import gzip, tempfile
-
-filename = tempfile.mktemp()
-
-data1 = """ int length=DEFAULTALLOC, err = Z_OK;
- PyObject *RetVal;
- int flushmode = Z_FINISH;
- unsigned long start_total_out;
-
-"""
-
-data2 = """/* zlibmodule.c -- gzip-compatible data compression */
-/* See http://www.cdrom.com/pub/infozip/zlib/ */
-/* See http://www.winimage.com/zLibDll for Windows */
-"""
-
-f = gzip.GzipFile(filename, 'wb') ; f.write(data1 * 50) ; f.close()
-
-f = gzip.GzipFile(filename, 'rb') ; d = f.read() ; f.close()
-assert d == data1*50
-
-# Append to the previous file
-f = gzip.GzipFile(filename, 'ab') ; f.write(data2 * 15) ; f.close()
-
-f = gzip.GzipFile(filename, 'rb') ; d = f.read() ; f.close()
-assert d == (data1*50) + (data2*15)
-
-# Try .readline() with varying line lengths
-
-f = gzip.GzipFile(filename, 'rb')
-line_length = 0
-while 1:
- L = f.readline( line_length )
- if L == "" and line_length != 0: break
- assert len(L) <= line_length
- line_length = (line_length + 1) % 50
-f.close()
-
-# Try .readlines()
-
-f = gzip.GzipFile(filename, 'rb')
-L = f.readlines()
-f.close()
-
-f = gzip.GzipFile(filename, 'rb')
-while 1:
- L = f.readlines(150)
- if L == []: break
-f.close()
-
-
-os.unlink( filename )
diff --git a/Lib/dos-8x3/test_has.py b/Lib/dos-8x3/test_has.py
deleted file mode 100644
index 51b4c33..0000000
--- a/Lib/dos-8x3/test_has.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# test the invariant that
-# iff a==b then hash(a)==hash(b)
-#
-
-import test_support
-
-
-def same_hash(*objlist):
- # hash each object given an raise TestFailed if
- # the hash values are not all the same
- hashed = map(hash, objlist)
- for h in hashed[1:]:
- if h != hashed[0]:
- raise TestFailed, "hashed values differ: %s" % `objlist`
-
-
-
-same_hash(1, 1L, 1.0, 1.0+0.0j)
-same_hash(int(1), long(1), float(1), complex(1))
-
-same_hash(long(1.23e300), float(1.23e300))
-
-same_hash(float(0.5), complex(0.5, 0.0))
-
-
-
diff --git a/Lib/dos-8x3/test_ima.py b/Lib/dos-8x3/test_ima.py
deleted file mode 100644
index 6b144c6..0000000
--- a/Lib/dos-8x3/test_ima.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#! /usr/bin/env python
-
-"""Test script for the imageop module. This has the side
- effect of partially testing the imgfile module as well.
- Roger E. Masse
-"""
-
-from test_support import verbose, unlink
-
-import imageop, uu
-
-def main(use_rgbimg=1):
-
- # Create binary test files
- uu.decode(get_qualified_path('testrgb.uue'), 'test.rgb')
-
- if use_rgbimg:
- image, width, height = getrgbimage('test.rgb')
- else:
- image, width, height = getimage('test.rgb')
-
- # Return the selected part of image, which should by width by height
- # in size and consist of pixels of psize bytes.
- if verbose:
- print 'crop'
- newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1)
-
- # Return image scaled to size newwidth by newheight. No interpolation
- # is done, scaling is done by simple-minded pixel duplication or removal.
- # Therefore, computer-generated images or dithered images will
- # not look nice after scaling.
- if verbose:
- print 'scale'
- scaleimage = imageop.scale(image, 4, width, height, 1, 1)
-
- # Run a vertical low-pass filter over an image. It does so by computing
- # each destination pixel as the average of two vertically-aligned source
- # pixels. The main use of this routine is to forestall excessive flicker
- # if the image two vertically-aligned source pixels, hence the name.
- if verbose:
- print 'tovideo'
- videoimage = imageop.tovideo (image, 4, width, height)
-
- # Convert an rgb image to an 8 bit rgb
- if verbose:
- print 'rgb2rgb8'
- greyimage = imageop.rgb2rgb8(image, width, height)
-
- # Convert an 8 bit rgb image to a 24 bit rgb image
- if verbose:
- print 'rgb82rgb'
- image = imageop.rgb82rgb(greyimage, width, height)
-
- # Convert an rgb image to an 8 bit greyscale image
- if verbose:
- print 'rgb2grey'
- greyimage = imageop.rgb2grey(image, width, height)
-
- # Convert an 8 bit greyscale image to a 24 bit rgb image
- if verbose:
- print 'grey2rgb'
- image = imageop.grey2rgb(greyimage, width, height)
-
- # Convert a 8-bit deep greyscale image to a 1-bit deep image by
- # thresholding all the pixels. The resulting image is tightly packed
- # and is probably only useful as an argument to mono2grey.
- if verbose:
- print 'grey2mono'
- monoimage = imageop.grey2mono (greyimage, width, height, 0)
-
- # monoimage, width, height = getimage('monotest.rgb')
- # Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
- # All pixels that are zero-valued on input get value p0 on output and
- # all one-value input pixels get value p1 on output. To convert a
- # monochrome black-and-white image to greyscale pass the values 0 and
- # 255 respectively.
- if verbose:
- print 'mono2grey'
- greyimage = imageop.mono2grey (monoimage, width, height, 0, 255)
-
- # Convert an 8-bit greyscale image to a 1-bit monochrome image using a
- # (simple-minded) dithering algorithm.
- if verbose:
- print 'dither2mono'
- monoimage = imageop.dither2mono (greyimage, width, height)
-
- # Convert an 8-bit greyscale image to a 4-bit greyscale image without
- # dithering.
- if verbose:
- print 'grey2grey4'
- grey4image = imageop.grey2grey4 (greyimage, width, height)
-
- # Convert an 8-bit greyscale image to a 2-bit greyscale image without
- # dithering.
- if verbose:
- print 'grey2grey2'
- grey2image = imageop.grey2grey2 (greyimage, width, height)
-
- # Convert an 8-bit greyscale image to a 2-bit greyscale image with
- # dithering. As for dither2mono, the dithering algorithm is currently
- # very simple.
- if verbose:
- print 'dither2grey2'
- grey2image = imageop.dither2grey2 (greyimage, width, height)
-
- # Convert a 4-bit greyscale image to an 8-bit greyscale image.
- if verbose:
- print 'grey42grey'
- greyimage = imageop.grey42grey (grey4image, width, height)
-
- # Convert a 2-bit greyscale image to an 8-bit greyscale image.
- if verbose:
- print 'grey22grey'
- image = imageop.grey22grey (grey2image, width, height)
-
- # Cleanup
- unlink('test.rgb')
-
-def getrgbimage(name):
- """return a tuple consisting of image (in 'imgfile' format but
- using rgbimg instead) width and height"""
-
- import rgbimg
-
- try:
- sizes = rgbimg.sizeofimage(name)
- except rgbimg.error:
- name = get_qualified_path(name)
- sizes = rgbimg.sizeofimage(name)
- if verbose:
- print 'rgbimg opening test image: %s, sizes: %s' % (name, str(sizes))
-
- image = rgbimg.longimagedata(name)
- return (image, sizes[0], sizes[1])
-
-def getimage(name):
- """return a tuple consisting of
- image (in 'imgfile' format) width and height
- """
-
- import imgfile
-
- try:
- sizes = imgfile.getsizes(name)
- except imgfile.error:
- name = get_qualified_path(name)
- sizes = imgfile.getsizes(name)
- if verbose:
- print 'imgfile opening test image: %s, sizes: %s' % (name, str(sizes))
-
- image = imgfile.read(name)
- return (image, sizes[0], sizes[1])
-
-def get_qualified_path(name):
- """ return a more qualified path to name"""
- import sys
- import os
- path = sys.path
- try:
- path = [os.path.dirname(__file__)] + path
- except NameError:
- pass
- for dir in path:
- fullname = os.path.join(dir, name)
- if os.path.exists(fullname):
- return fullname
- return name
-
-# rgbimg (unlike imgfile) is portable to platforms other than SGI.
-# So we prefer to use it.
-main(use_rgbimg=1)
diff --git a/Lib/dos-8x3/test_img.py b/Lib/dos-8x3/test_img.py
deleted file mode 100644
index b074320..0000000
--- a/Lib/dos-8x3/test_img.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#! /usr/bin/env python
-
-"""Simple test script for imgfile.c
- Roger E. Masse
-"""
-
-from test_support import verbose, unlink, findfile
-
-import imgfile, uu, os
-
-
-def main():
-
- uu.decode(findfile('testrgb.uue'), 'test.rgb')
- uu.decode(findfile('greyrgb.uue'), 'greytest.rgb')
-
- # Test a 3 byte color image
- testimage('test.rgb')
-
- # Test a 1 byte greyscale image
- testimage('greytest.rgb')
-
- unlink('test.rgb')
- unlink('greytest.rgb')
-
-def testimage(name):
- """Run through the imgfile's battery of possible methods
- on the image passed in name.
- """
-
- import sys
- import os
- import string
-
- outputfile = '/tmp/deleteme'
-
- # try opening the name directly
- try:
- # This function returns a tuple (x, y, z) where x and y are the size
- # of the image in pixels and z is the number of bytes per pixel. Only
- # 3 byte RGB pixels and 1 byte greyscale pixels are supported.
- sizes = imgfile.getsizes(name)
- except imgfile.error:
- # get a more qualified path component of the script...
- if __name__ == '__main__':
- ourname = sys.argv[0]
- else: # ...or the full path of the module
- ourname = sys.modules[__name__].__file__
-
- parts = string.splitfields(ourname, os.sep)
- parts[-1] = name
- name = string.joinfields(parts, os.sep)
- sizes = imgfile.getsizes(name)
- if verbose:
- print 'Opening test image: %s, sizes: %s' % (name, str(sizes))
- # This function reads and decodes the image on the specified file,
- # and returns it as a python string. The string has either 1 byte
- # greyscale pixels or 4 byte RGBA pixels. The bottom left pixel
- # is the first in the string. This format is suitable to pass
- # to gl.lrectwrite, for instance.
- image = imgfile.read(name)
-
- # This function writes the RGB or greyscale data in data to
- # image file file. x and y give the size of the image, z is
- # 1 for 1 byte greyscale images or 3 for RGB images (which
- # are stored as 4 byte values of which only the lower three
- # bytes are used). These are the formats returned by gl.lrectread.
- if verbose:
- print 'Writing output file'
- imgfile.write (outputfile, image, sizes[0], sizes[1], sizes[2])
-
-
- if verbose:
- print 'Opening scaled test image: %s, sizes: %s' % (name, str(sizes))
- # This function is identical to read but it returns an image that
- # is scaled to the given x and y sizes. If the filter and blur
- # parameters are omitted scaling is done by simply dropping
- # or duplicating pixels, so the result will be less than perfect,
- # especially for computer-generated images. Alternatively,
- # you can specify a filter to use to smoothen the image after
- # scaling. The filter forms supported are 'impulse', 'box',
- # 'triangle', 'quadratic' and 'gaussian'. If a filter is
- # specified blur is an optional parameter specifying the
- # blurriness of the filter. It defaults to 1.0. readscaled
- # makes no attempt to keep the aspect ratio correct, so that
- # is the users' responsibility.
- if verbose:
- print 'Filtering with "impulse"'
- simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'impulse', 2.0)
-
- # This function sets a global flag which defines whether the
- # scan lines of the image are read or written from bottom to
- # top (flag is zero, compatible with SGI GL) or from top to
- # bottom(flag is one, compatible with X). The default is zero.
- if verbose:
- print 'Switching to X compatibility'
- imgfile.ttob (1)
-
- if verbose:
- print 'Filtering with "triangle"'
- simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'triangle', 3.0)
- if verbose:
- print 'Switching back to SGI compatibility'
- imgfile.ttob (0)
-
- if verbose: print 'Filtering with "quadratic"'
- simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'quadratic')
- if verbose: print 'Filtering with "gaussian"'
- simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'gaussian', 1.0)
-
- if verbose:
- print 'Writing output file'
- imgfile.write (outputfile, simage, sizes[0]/2, sizes[1]/2, sizes[2])
-
- os.unlink(outputfile)
-
-main()
diff --git a/Lib/dos-8x3/test_imp.py b/Lib/dos-8x3/test_imp.py
deleted file mode 100644
index c7ab753..0000000
--- a/Lib/dos-8x3/test_imp.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from test_support import TESTFN
-
-import os
-import random
-
-source = TESTFN + ".py"
-pyc = TESTFN + ".pyc"
-pyo = TESTFN + ".pyo"
-
-f = open(source, "w")
-print >> f, "# This will test Python's ability to import a .py file"
-a = random.randrange(1000)
-b = random.randrange(1000)
-print >> f, "a =", a
-print >> f, "b =", b
-f.close()
-
-try:
- try:
- mod = __import__(TESTFN)
- except ImportError, err:
- raise ValueError, "import from .py failed: %s" % err
-
- if mod.a != a or mod.b != b:
- print a, "!=", mod.a
- print b, "!=", mod.b
- raise ValueError, "module loaded (%s) but contents invalid" % mod
-finally:
- os.unlink(source)
-
-try:
- try:
- reload(mod)
- except ImportError, err:
- raise ValueError, "import from .pyc/.pyo failed: %s" % err
-finally:
- try:
- os.unlink(pyc)
- except os.error:
- pass
- try:
- os.unlink(pyo)
- except os.error:
- pass
diff --git a/Lib/dos-8x3/test_lar.py b/Lib/dos-8x3/test_lar.py
deleted file mode 100644
index 5b65237..0000000
--- a/Lib/dos-8x3/test_lar.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!python
-
-#----------------------------------------------------------------------
-# test largefile support on system where this makes sense
-#
-#XXX how to only run this when support is there
-#XXX how to only optionally run this, it will take along time
-#----------------------------------------------------------------------
-
-import test_support
-import os, struct, stat, sys
-
-
-# only run if the current system support large files
-f = open(test_support.TESTFN, 'w')
-try:
- # 2**31 == 2147483648
- f.seek(2147483649L)
-except OverflowError:
- raise test_support.TestSkipped, "platform does not have largefile support"
-else:
- f.close()
-
-
-# create >2GB file (2GB = 2147483648 bytes)
-size = 2500000000L
-name = test_support.TESTFN
-
-
-# on Windows this test comsumes large resources:
-# it takes a long time to build the >2GB file and takes >2GB of disk space
-# therefore test_support.use_large_resources must be defined to run this test
-if sys.platform[:3] == 'win' and not test_support.use_large_resources:
- raise test_support.TestSkipped, \
- "test requires %s bytes and a long time to run" % str(size)
-
-
-
-def expect(got_this, expect_this):
- if test_support.verbose:
- print '%s =?= %s ...' % (`got_this`, `expect_this`),
- if got_this != expect_this:
- if test_support.verbose:
- print 'no'
- raise test_support.TestFailed, 'got %s, but expected %s' %\
- (str(got_this), str(expect_this))
- else:
- if test_support.verbose:
- print 'yes'
-
-
-# test that each file function works as expected for a large (i.e. >2GB, do
-# we have to check >4GB) files
-
-if test_support.verbose:
- print 'create large file via seek (may be sparse file) ...'
-f = open(name, 'w')
-f.seek(size)
-f.write('a')
-f.flush()
-expect(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
-if test_support.verbose:
- print 'check file size with os.fstat'
-f.close()
-if test_support.verbose:
- print 'check file size with os.stat'
-expect(os.stat(name)[stat.ST_SIZE], size+1)
-
-if test_support.verbose:
- print 'play around with seek() and read() with the built largefile'
-f = open(name, 'r')
-expect(f.tell(), 0)
-expect(f.read(1), '\000')
-expect(f.tell(), 1)
-f.seek(0)
-expect(f.tell(), 0)
-f.seek(0, 0)
-expect(f.tell(), 0)
-f.seek(42)
-expect(f.tell(), 42)
-f.seek(42, 0)
-expect(f.tell(), 42)
-f.seek(42, 1)
-expect(f.tell(), 84)
-f.seek(0, 1)
-expect(f.tell(), 84)
-f.seek(0, 2) # seek from the end
-expect(f.tell(), size + 1 + 0)
-f.seek(-10, 2)
-expect(f.tell(), size + 1 - 10)
-f.seek(-size-1, 2)
-expect(f.tell(), 0)
-f.seek(size)
-expect(f.tell(), size)
-expect(f.read(1), 'a') # the 'a' that was written at the end of the file above
-f.close()
-
-if test_support.verbose:
- print 'play around with os.lseek() with the built largefile'
-f = open(name, 'r')
-expect(os.lseek(f.fileno(), 0, 0), 0)
-expect(os.lseek(f.fileno(), 42, 0), 42)
-expect(os.lseek(f.fileno(), 42, 1), 84)
-expect(os.lseek(f.fileno(), 0, 1), 84)
-expect(os.lseek(f.fileno(), 0, 2), size+1+0)
-expect(os.lseek(f.fileno(), -10, 2), size+1-10)
-expect(os.lseek(f.fileno(), -size-1, 2), 0)
-expect(os.lseek(f.fileno(), size, 0), size)
-expect(f.read(1), 'a') # the 'a' that was written at the end of the file above
-f.close()
-
-
-# XXX add tests for truncate if it exists
-# XXX has truncate ever worked on Windows? specifically on WinNT I get:
-# "IOError: [Errno 13] Permission denied"
-##try:
-## newsize = size - 10
-## f.seek(newsize)
-## f.truncate()
-## expect(f.tell(), newsize)
-## newsize = newsize - 1
-## f.seek(0)
-## f.truncate(newsize)
-## expect(f.tell(), newsize)
-##except AttributeError:
-## pass
-
-os.unlink(name)
-
diff --git a/Lib/dos-8x3/test_lin.py b/Lib/dos-8x3/test_lin.py
deleted file mode 100644
index 7924a24..0000000
--- a/Lib/dos-8x3/test_lin.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from test_support import verbose, findfile, TestFailed, TestSkipped
-
-import errno
-import fcntl
-import linuxaudiodev
-import os
-import sys
-import select
-import sunaudio
-import time
-import audioop
-
-SND_FORMAT_MULAW_8 = 1
-
-def play_sound_file(path):
- fp = open(path, 'r')
- size, enc, rate, nchannels, extra = sunaudio.gethdr(fp)
- data = fp.read()
- fp.close()
-
- if enc != SND_FORMAT_MULAW_8:
- print "Expect .au file with 8-bit mu-law samples"
- return
-
- try:
- a = linuxaudiodev.open('w')
- except linuxaudiodev.error, msg:
- if msg[0] in (errno.EACCES, errno.ENODEV):
- raise TestSkipped, msg
- raise TestFailed, msg
-
- # convert the data to 16-bit signed
- data = audioop.ulaw2lin(data, 2)
-
- # set the data format
- if sys.byteorder == 'little':
- fmt = linuxaudiodev.AFMT_S16_LE
- else:
- fmt = linuxaudiodev.AFMT_S16_BE
-
- # at least check that these methods can be invoked
- a.bufsize()
- a.obufcount()
- a.obuffree()
- a.getptr()
- a.fileno()
-
- # set parameters based on .au file headers
- a.setparameters(rate, 16, nchannels, fmt)
- a.write(data)
- a.flush()
- a.close()
-
-def test_errors():
- a = linuxaudiodev.open("w")
- size = 8
- fmt = linuxaudiodev.AFMT_U8
- rate = 8000
- nchannels = 1
- try:
- a.setparameters(-1, size, nchannels, fmt)
- except ValueError, msg:
- print msg
- try:
- a.setparameters(rate, -2, nchannels, fmt)
- except ValueError, msg:
- print msg
- try:
- a.setparameters(rate, size, 3, fmt)
- except ValueError, msg:
- print msg
- try:
- a.setparameters(rate, size, nchannels, 177)
- except ValueError, msg:
- print msg
- try:
- a.setparameters(rate, size, nchannels, linuxaudiodev.AFMT_U16_LE)
- except ValueError, msg:
- print msg
- try:
- a.setparameters(rate, 16, nchannels, fmt)
- except ValueError, msg:
- print msg
-
-def test():
- play_sound_file(findfile('audiotest.au'))
- test_errors()
-
-test()
diff --git a/Lib/dos-8x3/test_lon.py b/Lib/dos-8x3/test_lon.py
deleted file mode 100644
index 6a1f117..0000000
--- a/Lib/dos-8x3/test_lon.py
+++ /dev/null
@@ -1,260 +0,0 @@
-from test_support import TestFailed, verbose
-from string import join
-from random import random, randint
-
-# SHIFT should match the value in longintrepr.h for best testing.
-SHIFT = 15
-BASE = 2 ** SHIFT
-MASK = BASE - 1
-
-# Max number of base BASE digits to use in test cases. Doubling
-# this will at least quadruple the runtime.
-MAXDIGITS = 10
-
-# build some special values
-special = map(long, [0, 1, 2, BASE, BASE >> 1])
-special.append(0x5555555555555555L)
-special.append(0xaaaaaaaaaaaaaaaaL)
-# some solid strings of one bits
-p2 = 4L # 0 and 1 already added
-for i in range(2*SHIFT):
- special.append(p2 - 1)
- p2 = p2 << 1
-del p2
-# add complements & negations
-special = special + map(lambda x: ~x, special) + \
- map(lambda x: -x, special)
-
-# ------------------------------------------------------------ utilities
-
-# Use check instead of assert so the test still does something
-# under -O.
-
-def check(ok, *args):
- if not ok:
- raise TestFailed, join(map(str, args), " ")
-
-# Get quasi-random long consisting of ndigits digits (in base BASE).
-# quasi == the most-significant digit will not be 0, and the number
-# is constructed to contain long strings of 0 and 1 bits. These are
-# more likely than random bits to provoke digit-boundary errors.
-# The sign of the number is also random.
-
-def getran(ndigits):
- assert ndigits > 0
- nbits_hi = ndigits * SHIFT
- nbits_lo = nbits_hi - SHIFT + 1
- answer = 0L
- nbits = 0
- r = int(random() * (SHIFT * 2)) | 1 # force 1 bits to start
- while nbits < nbits_lo:
- bits = (r >> 1) + 1
- bits = min(bits, nbits_hi - nbits)
- assert 1 <= bits <= SHIFT
- nbits = nbits + bits
- answer = answer << bits
- if r & 1:
- answer = answer | ((1 << bits) - 1)
- r = int(random() * (SHIFT * 2))
- assert nbits_lo <= nbits <= nbits_hi
- if random() < 0.5:
- answer = -answer
- return answer
-
-# Get random long consisting of ndigits random digits (relative to base
-# BASE). The sign bit is also random.
-
-def getran2(ndigits):
- answer = 0L
- for i in range(ndigits):
- answer = (answer << SHIFT) | randint(0, MASK)
- if random() < 0.5:
- answer = -answer
- return answer
-
-# --------------------------------------------------------------- divmod
-
-def test_division_2(x, y):
- q, r = divmod(x, y)
- q2, r2 = x/y, x%y
- pab, pba = x*y, y*x
- check(pab == pba, "multiplication does not commute for", x, y)
- check(q == q2, "divmod returns different quotient than / for", x, y)
- check(r == r2, "divmod returns different mod than % for", x, y)
- check(x == q*y + r, "x != q*y + r after divmod on", x, y)
- if y > 0:
- check(0 <= r < y, "bad mod from divmod on", x, y)
- else:
- check(y < r <= 0, "bad mod from divmod on", x, y)
-
-def test_division(maxdigits=MAXDIGITS):
- print "long / * % divmod"
- digits = range(1, maxdigits+1)
- for lenx in digits:
- x = getran(lenx)
- for leny in digits:
- y = getran(leny) or 1L
- test_division_2(x, y)
-
-# -------------------------------------------------------------- ~ & | ^
-
-def test_bitop_identities_1(x):
- check(x & 0 == 0, "x & 0 != 0 for", x)
- check(x | 0 == x, "x | 0 != x for", x)
- check(x ^ 0 == x, "x ^ 0 != x for", x)
- check(x & -1 == x, "x & -1 != x for", x)
- check(x | -1 == -1, "x | -1 != -1 for", x)
- check(x ^ -1 == ~x, "x ^ -1 != ~x for", x)
- check(x == ~~x, "x != ~~x for", x)
- check(x & x == x, "x & x != x for", x)
- check(x | x == x, "x | x != x for", x)
- check(x ^ x == 0, "x ^ x != 0 for", x)
- check(x & ~x == 0, "x & ~x != 0 for", x)
- check(x | ~x == -1, "x | ~x != -1 for", x)
- check(x ^ ~x == -1, "x ^ ~x != -1 for", x)
- check(-x == 1 + ~x == ~(x-1), "not -x == 1 + ~x == ~(x-1) for", x)
- for n in range(2*SHIFT):
- p2 = 2L ** n
- check(x << n >> n == x, "x << n >> n != x for", x, n)
- check(x / p2 == x >> n, "x / p2 != x >> n for x n p2", x, n, p2)
- check(x * p2 == x << n, "x * p2 != x << n for x n p2", x, n, p2)
- check(x & -p2 == x >> n << n == x & ~(p2 - 1),
- "not x & -p2 == x >> n << n == x & ~(p2 - 1) for x n p2",
- x, n, p2)
-
-def test_bitop_identities_2(x, y):
- check(x & y == y & x, "x & y != y & x for", x, y)
- check(x | y == y | x, "x | y != y | x for", x, y)
- check(x ^ y == y ^ x, "x ^ y != y ^ x for", x, y)
- check(x ^ y ^ x == y, "x ^ y ^ x != y for", x, y)
- check(x & y == ~(~x | ~y), "x & y != ~(~x | ~y) for", x, y)
- check(x | y == ~(~x & ~y), "x | y != ~(~x & ~y) for", x, y)
- check(x ^ y == (x | y) & ~(x & y),
- "x ^ y != (x | y) & ~(x & y) for", x, y)
- check(x ^ y == (x & ~y) | (~x & y),
- "x ^ y == (x & ~y) | (~x & y) for", x, y)
- check(x ^ y == (x | y) & (~x | ~y),
- "x ^ y == (x | y) & (~x | ~y) for", x, y)
-
-def test_bitop_identities_3(x, y, z):
- check((x & y) & z == x & (y & z),
- "(x & y) & z != x & (y & z) for", x, y, z)
- check((x | y) | z == x | (y | z),
- "(x | y) | z != x | (y | z) for", x, y, z)
- check((x ^ y) ^ z == x ^ (y ^ z),
- "(x ^ y) ^ z != x ^ (y ^ z) for", x, y, z)
- check(x & (y | z) == (x & y) | (x & z),
- "x & (y | z) != (x & y) | (x & z) for", x, y, z)
- check(x | (y & z) == (x | y) & (x | z),
- "x | (y & z) != (x | y) & (x | z) for", x, y, z)
-
-def test_bitop_identities(maxdigits=MAXDIGITS):
- print "long bit-operation identities"
- for x in special:
- test_bitop_identities_1(x)
- digits = range(1, maxdigits+1)
- for lenx in digits:
- x = getran(lenx)
- test_bitop_identities_1(x)
- for leny in digits:
- y = getran(leny)
- test_bitop_identities_2(x, y)
- test_bitop_identities_3(x, y, getran((lenx + leny)/2))
-
-# ------------------------------------------------- hex oct repr str atol
-
-def slow_format(x, base):
- if (x, base) == (0, 8):
- # this is an oddball!
- return "0L"
- digits = []
- sign = 0
- if x < 0:
- sign, x = 1, -x
- while x:
- x, r = divmod(x, base)
- digits.append(int(r))
- digits.reverse()
- digits = digits or [0]
- return '-'[:sign] + \
- {8: '0', 10: '', 16: '0x'}[base] + \
- join(map(lambda i: "0123456789ABCDEF"[i], digits), '') + \
- "L"
-
-def test_format_1(x):
- from string import atol
- for base, mapper in (8, oct), (10, repr), (16, hex):
- got = mapper(x)
- expected = slow_format(x, base)
- check(got == expected, mapper.__name__, "returned",
- got, "but expected", expected, "for", x)
- check(atol(got, 0) == x, 'atol("%s", 0) !=' % got, x)
- # str() has to be checked a little differently since there's no
- # trailing "L"
- got = str(x)
- expected = slow_format(x, 10)[:-1]
- check(got == expected, mapper.__name__, "returned",
- got, "but expected", expected, "for", x)
-
-def test_format(maxdigits=MAXDIGITS):
- print "long str/hex/oct/atol"
- for x in special:
- test_format_1(x)
- for i in range(10):
- for lenx in range(1, maxdigits+1):
- x = getran(lenx)
- test_format_1(x)
-
-# ----------------------------------------------------------------- misc
-
-def test_misc(maxdigits=MAXDIGITS):
- print "long miscellaneous operations"
- import sys
-
- # check the extremes in int<->long conversion
- hugepos = sys.maxint
- hugeneg = -hugepos - 1
- hugepos_aslong = long(hugepos)
- hugeneg_aslong = long(hugeneg)
- check(hugepos == hugepos_aslong, "long(sys.maxint) != sys.maxint")
- check(hugeneg == hugeneg_aslong,
- "long(-sys.maxint-1) != -sys.maxint-1")
-
- # long -> int should not fail for hugepos_aslong or hugeneg_aslong
- try:
- check(int(hugepos_aslong) == hugepos,
- "converting sys.maxint to long and back to int fails")
- except OverflowError:
- raise TestFailed, "int(long(sys.maxint)) overflowed!"
- try:
- check(int(hugeneg_aslong) == hugeneg,
- "converting -sys.maxint-1 to long and back to int fails")
- except OverflowError:
- raise TestFailed, "int(long(-sys.maxint-1)) overflowed!"
-
- # but long -> int should overflow for hugepos+1 and hugeneg-1
- x = hugepos_aslong + 1
- try:
- int(x)
- raise ValueError
- except OverflowError:
- pass
- except:
- raise TestFailed, "int(long(sys.maxint) + 1) didn't overflow"
-
- x = hugeneg_aslong - 1
- try:
- int(x)
- raise ValueError
- except OverflowError:
- pass
- except:
- raise TestFailed, "int(long(-sys.maxint-1) - 1) didn't overflow"
-
-# ---------------------------------------------------------------- do it
-
-test_division()
-test_bitop_identities()
-test_format()
-test_misc()
-
diff --git a/Lib/dos-8x3/test_mat.py b/Lib/dos-8x3/test_mat.py
deleted file mode 100644
index b7fde0a..0000000
--- a/Lib/dos-8x3/test_mat.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Python test set -- math module
-# XXXX Should not do tests around zero only
-
-from test_support import *
-
-seps='1e-05'
-eps = eval(seps)
-print 'math module, testing with eps', seps
-import math
-
-def testit(name, value, expected):
- if abs(value-expected) > eps:
- raise TestFailed, '%s returned %f, expected %f'%\
- (name, value, expected)
-
-print 'constants'
-testit('pi', math.pi, 3.1415926)
-testit('e', math.e, 2.7182818)
-
-print 'acos'
-testit('acos(-1)', math.acos(-1), math.pi)
-testit('acos(0)', math.acos(0), math.pi/2)
-testit('acos(1)', math.acos(1), 0)
-
-print 'asin'
-testit('asin(-1)', math.asin(-1), -math.pi/2)
-testit('asin(0)', math.asin(0), 0)
-testit('asin(1)', math.asin(1), math.pi/2)
-
-print 'atan'
-testit('atan(-1)', math.atan(-1), -math.pi/4)
-testit('atan(0)', math.atan(0), 0)
-testit('atan(1)', math.atan(1), math.pi/4)
-
-print 'atan2'
-testit('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
-testit('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
-testit('atan2(0, 1)', math.atan2(0, 1), 0)
-testit('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
-testit('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
-
-print 'ceil'
-testit('ceil(0.5)', math.ceil(0.5), 1)
-testit('ceil(1.0)', math.ceil(1.0), 1)
-testit('ceil(1.5)', math.ceil(1.5), 2)
-testit('ceil(-0.5)', math.ceil(-0.5), 0)
-testit('ceil(-1.0)', math.ceil(-1.0), -1)
-testit('ceil(-1.5)', math.ceil(-1.5), -1)
-
-print 'cos'
-testit('cos(-pi/2)', math.cos(-math.pi/2), 0)
-testit('cos(0)', math.cos(0), 1)
-testit('cos(pi/2)', math.cos(math.pi/2), 0)
-testit('cos(pi)', math.cos(math.pi), -1)
-
-print 'cosh'
-testit('cosh(0)', math.cosh(0), 1)
-testit('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
-
-print 'exp'
-testit('exp(-1)', math.exp(-1), 1/math.e)
-testit('exp(0)', math.exp(0), 1)
-testit('exp(1)', math.exp(1), math.e)
-
-print 'fabs'
-testit('fabs(-1)', math.fabs(-1), 1)
-testit('fabs(0)', math.fabs(0), 0)
-testit('fabs(1)', math.fabs(1), 1)
-
-print 'floor'
-testit('floor(0.5)', math.floor(0.5), 0)
-testit('floor(1.0)', math.floor(1.0), 1)
-testit('floor(1.5)', math.floor(1.5), 1)
-testit('floor(-0.5)', math.floor(-0.5), -1)
-testit('floor(-1.0)', math.floor(-1.0), -1)
-testit('floor(-1.5)', math.floor(-1.5), -2)
-
-print 'fmod'
-testit('fmod(10,1)', math.fmod(10,1), 0)
-testit('fmod(10,0.5)', math.fmod(10,0.5), 0)
-testit('fmod(10,1.5)', math.fmod(10,1.5), 1)
-testit('fmod(-10,1)', math.fmod(-10,1), 0)
-testit('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
-testit('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
-
-print 'frexp'
-def testfrexp(name, (mant, exp), (emant, eexp)):
- if abs(mant-emant) > eps or exp <> eexp:
- raise TestFailed, '%s returned %s, expected %s'%\
- (name, `mant, exp`, `emant,eexp`)
-
-testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
-testfrexp('frexp(0)', math.frexp(0), (0, 0))
-testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
-testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
-
-print 'hypot'
-testit('hypot(0,0)', math.hypot(0,0), 0)
-testit('hypot(3,4)', math.hypot(3,4), 5)
-
-print 'ldexp'
-testit('ldexp(0,1)', math.ldexp(0,1), 0)
-testit('ldexp(1,1)', math.ldexp(1,1), 2)
-testit('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
-testit('ldexp(-1,1)', math.ldexp(-1,1), -2)
-
-print 'log'
-testit('log(1/e)', math.log(1/math.e), -1)
-testit('log(1)', math.log(1), 0)
-testit('log(e)', math.log(math.e), 1)
-
-print 'log10'
-testit('log10(0.1)', math.log10(0.1), -1)
-testit('log10(1)', math.log10(1), 0)
-testit('log10(10)', math.log10(10), 1)
-
-print 'modf'
-def testmodf(name, (v1, v2), (e1, e2)):
- if abs(v1-e1) > eps or abs(v2-e2):
- raise TestFailed, '%s returned %s, expected %s'%\
- (name, `v1,v2`, `e1,e2`)
-
-testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
-testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
-
-print 'pow'
-testit('pow(0,1)', math.pow(0,1), 0)
-testit('pow(1,0)', math.pow(1,0), 1)
-testit('pow(2,1)', math.pow(2,1), 2)
-testit('pow(2,-1)', math.pow(2,-1), 0.5)
-
-print 'sin'
-testit('sin(0)', math.sin(0), 0)
-testit('sin(pi/2)', math.sin(math.pi/2), 1)
-testit('sin(-pi/2)', math.sin(-math.pi/2), -1)
-
-print 'sinh'
-testit('sinh(0)', math.sinh(0), 0)
-testit('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
-testit('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
-
-print 'sqrt'
-testit('sqrt(0)', math.sqrt(0), 0)
-testit('sqrt(1)', math.sqrt(1), 1)
-testit('sqrt(4)', math.sqrt(4), 2)
-
-print 'tan'
-testit('tan(0)', math.tan(0), 0)
-testit('tan(pi/4)', math.tan(math.pi/4), 1)
-testit('tan(-pi/4)', math.tan(-math.pi/4), -1)
-
-print 'tanh'
-testit('tanh(0)', math.tanh(0), 0)
-testit('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
-
-# RED_FLAG 16-Oct-2000 Tim
-# While 2.0 is more consistent about exceptions than previous releases, it
-# still fails this part of the test on some platforms. For now, we only
-# *run* test_exceptions() in verbose mode, so that this isn't normally
-# tested.
-
-def test_exceptions():
- print 'exceptions'
- try:
- x = math.exp(-1000000000)
- except:
- # mathmodule.c is failing to weed out underflows from libm, or
- # we've got an fp format with huge dynamic range
- raise TestFailed("underflowing exp() should not have raised "
- "an exception")
- if x != 0:
- raise TestFailed("underflowing exp() should have returned 0")
-
- # If this fails, probably using a strict IEEE-754 conforming libm, and x
- # is +Inf afterwards. But Python wants overflows detected by default.
- try:
- x = math.exp(1000000000)
- except OverflowError:
- pass
- else:
- raise TestFailed("overflowing exp() didn't trigger OverflowError")
-
- # If this fails, it could be a puzzle. One odd possibility is that
- # mathmodule.c's CHECK() macro is getting confused while comparing
- # Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
- # as a result (and so raising OverflowError instead).
- try:
- x = math.sqrt(-1.0)
- except ValueError:
- pass
- else:
- raise TestFailed("sqrt(-1) didn't raise ValueError")
-
-if verbose:
- test_exceptions()
diff --git a/Lib/dos-8x3/test_mim.py b/Lib/dos-8x3/test_mim.py
deleted file mode 100644
index 8b74421..0000000
--- a/Lib/dos-8x3/test_mim.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""Test program for MimeWriter module.
-
-The test program was too big to comfortably fit in the MimeWriter
-class, so it's here in its own file.
-
-This should generate Barry's example, modulo some quotes and newlines.
-
-"""
-
-
-from MimeWriter import MimeWriter
-
-SELLER = '''\
-INTERFACE Seller-1;
-
-TYPE Seller = OBJECT
- DOCUMENTATION "A simple Seller interface to test ILU"
- METHODS
- price():INTEGER,
- END;
-'''
-
-BUYER = '''\
-class Buyer:
- def __setup__(self, maxprice):
- self._maxprice = maxprice
-
- def __main__(self, kos):
- """Entry point upon arrival at a new KOS."""
- broker = kos.broker()
- # B4 == Barry's Big Bass Business :-)
- seller = broker.lookup('Seller_1.Seller', 'B4')
- if seller:
- price = seller.price()
- print 'Seller wants $', price, '... '
- if price > self._maxprice:
- print 'too much!'
- else:
- print "I'll take it!"
- else:
- print 'no seller found here'
-''' # Don't ask why this comment is here
-
-STATE = '''\
-# instantiate a buyer instance and put it in a magic place for the KOS
-# to find.
-__kp__ = Buyer()
-__kp__.__setup__(500)
-'''
-
-SIMPLE_METADATA = [
- ("Interpreter", "python"),
- ("Interpreter-Version", "1.3"),
- ("Owner-Name", "Barry Warsaw"),
- ("Owner-Rendezvous", "bwarsaw@cnri.reston.va.us"),
- ("Home-KSS", "kss.cnri.reston.va.us"),
- ("Identifier", "hdl://cnri.kss/my_first_knowbot"),
- ("Launch-Date", "Mon Feb 12 16:39:03 EST 1996"),
- ]
-
-COMPLEX_METADATA = [
- ("Metadata-Type", "complex"),
- ("Metadata-Key", "connection"),
- ("Access", "read-only"),
- ("Connection-Description", "Barry's Big Bass Business"),
- ("Connection-Id", "B4"),
- ("Connection-Direction", "client"),
- ]
-
-EXTERNAL_METADATA = [
- ("Metadata-Type", "complex"),
- ("Metadata-Key", "generic-interface"),
- ("Access", "read-only"),
- ("Connection-Description", "Generic Interface for All Knowbots"),
- ("Connection-Id", "generic-kp"),
- ("Connection-Direction", "client"),
- ]
-
-
-def main():
- import sys
-
- # Toplevel headers
-
- toplevel = MimeWriter(sys.stdout)
- toplevel.addheader("From", "bwarsaw@cnri.reston.va.us")
- toplevel.addheader("Date", "Mon Feb 12 17:21:48 EST 1996")
- toplevel.addheader("To", "kss-submit@cnri.reston.va.us")
- toplevel.addheader("MIME-Version", "1.0")
-
- # Toplevel body parts
-
- f = toplevel.startmultipartbody("knowbot", "801spam999",
- [("version", "0.1")], prefix=0)
- f.write("This is a multi-part message in MIME format.\n")
-
- # First toplevel body part: metadata
-
- md = toplevel.nextpart()
- md.startmultipartbody("knowbot-metadata", "802spam999")
-
- # Metadata part 1
-
- md1 = md.nextpart()
- md1.addheader("KP-Metadata-Type", "simple")
- md1.addheader("KP-Access", "read-only")
- m = MimeWriter(md1.startbody("message/rfc822"))
- for key, value in SIMPLE_METADATA:
- m.addheader("KPMD-" + key, value)
- m.flushheaders()
- del md1
-
- # Metadata part 2
-
- md2 = md.nextpart()
- for key, value in COMPLEX_METADATA:
- md2.addheader("KP-" + key, value)
- f = md2.startbody("text/isl")
- f.write(SELLER)
- del md2
-
- # Metadata part 3
-
- md3 = md.nextpart()
- f = md3.startbody("message/external-body",
- [("access-type", "URL"),
- ("URL", "hdl://cnri.kss/generic-knowbot")])
- m = MimeWriter(f)
- for key, value in EXTERNAL_METADATA:
- md3.addheader("KP-" + key, value)
- md3.startbody("text/isl")
- # Phantom body doesn't need to be written
-
- md.lastpart()
-
- # Second toplevel body part: code
-
- code = toplevel.nextpart()
- code.startmultipartbody("knowbot-code", "803spam999")
-
- # Code: buyer program source
-
- buyer = code.nextpart()
- buyer.addheader("KP-Module-Name", "BuyerKP")
- f = buyer.startbody("text/plain")
- f.write(BUYER)
-
- code.lastpart()
-
- # Third toplevel body part: state
-
- state = toplevel.nextpart()
- state.addheader("KP-Main-Module", "main")
- state.startmultipartbody("knowbot-state", "804spam999")
-
- # State: a bunch of assignments
-
- st = state.nextpart()
- st.addheader("KP-Module-Name", "main")
- f = st.startbody("text/plain")
- f.write(STATE)
-
- state.lastpart()
-
- # End toplevel body parts
-
- toplevel.lastpart()
-
-
-main()
diff --git a/Lib/dos-8x3/test_min.py b/Lib/dos-8x3/test_min.py
deleted file mode 100644
index 7afdf5d..0000000
--- a/Lib/dos-8x3/test_min.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# test for xml.dom.minidom
-
-from xml.dom.minidom import parse, Node, Document, parseString
-import xml.parsers.expat
-
-import os.path
-import sys
-import traceback
-from test_support import verbose
-
-if __name__ == "__main__":
- base = sys.argv[0]
-else:
- base = __file__
-tstfile = os.path.join(os.path.dirname(base), "test.xml")
-del base
-
-def confirm(test, testname = "Test"):
- if test:
- print "Passed " + testname
- else:
- print "Failed " + testname
- raise Exception
-
-Node._debug = 1
-
-def testParseFromFile():
- from StringIO import StringIO
- dom = parse(StringIO(open(tstfile).read()))
- dom.unlink()
- confirm(isinstance(dom,Document))
-
-def testGetElementsByTagName():
- dom = parse(tstfile)
- confirm(dom.getElementsByTagName("LI") == \
- dom.documentElement.getElementsByTagName("LI"))
- dom.unlink()
-
-def testInsertBefore():
- dom = parse(tstfile)
- docel = dom.documentElement
- #docel.insertBefore( dom.createProcessingInstruction("a", "b"),
- # docel.childNodes[1])
-
- #docel.insertBefore( dom.createProcessingInstruction("a", "b"),
- # docel.childNodes[0])
-
- #confirm( docel.childNodes[0].tet == "a")
- #confirm( docel.childNodes[2].tet == "a")
- dom.unlink()
-
-def testAppendChild():
- dom = parse(tstfile)
- dom.documentElement.appendChild(dom.createComment(u"Hello"))
- confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
- confirm(dom.documentElement.childNodes[-1].data == "Hello")
- dom.unlink()
-
-def testNonZero():
- dom = parse(tstfile)
- confirm(dom)# should not be zero
- dom.appendChild(dom.createComment("foo"))
- confirm(not dom.childNodes[-1].childNodes)
- dom.unlink()
-
-def testUnlink():
- dom = parse(tstfile)
- dom.unlink()
-
-def testElement():
- dom = Document()
- dom.appendChild(dom.createElement("abc"))
- confirm(dom.documentElement)
- dom.unlink()
-
-def testAAA():
- dom = parseString("<abc/>")
- el = dom.documentElement
- el.setAttribute("spam", "jam2")
- dom.unlink()
-
-def testAAB():
- dom = parseString("<abc/>")
- el = dom.documentElement
- el.setAttribute("spam", "jam")
- el.setAttribute("spam", "jam2")
- dom.unlink()
-
-def testAddAttr():
- dom = Document()
- child = dom.appendChild(dom.createElement("abc"))
-
- child.setAttribute("def", "ghi")
- confirm(child.getAttribute("def") == "ghi")
- confirm(child.attributes["def"].value == "ghi")
-
- child.setAttribute("jkl", "mno")
- confirm(child.getAttribute("jkl") == "mno")
- confirm(child.attributes["jkl"].value == "mno")
-
- confirm(len(child.attributes) == 2)
-
- child.setAttribute("def", "newval")
- confirm(child.getAttribute("def") == "newval")
- confirm(child.attributes["def"].value == "newval")
-
- confirm(len(child.attributes) == 2)
- dom.unlink()
-
-def testDeleteAttr():
- dom = Document()
- child = dom.appendChild(dom.createElement("abc"))
-
- confirm(len(child.attributes) == 0)
- child.setAttribute("def", "ghi")
- confirm(len(child.attributes) == 1)
- del child.attributes["def"]
- confirm(len(child.attributes) == 0)
- dom.unlink()
-
-def testRemoveAttr():
- dom = Document()
- child = dom.appendChild(dom.createElement("abc"))
-
- child.setAttribute("def", "ghi")
- confirm(len(child.attributes) == 1)
- child.removeAttribute("def")
- confirm(len(child.attributes) == 0)
-
- dom.unlink()
-
-def testRemoveAttrNS():
- dom = Document()
- child = dom.appendChild(
- dom.createElementNS("http://www.python.org", "python:abc"))
- child.setAttributeNS("http://www.w3.org", "xmlns:python",
- "http://www.python.org")
- child.setAttributeNS("http://www.python.org", "python:abcattr", "foo")
- confirm(len(child.attributes) == 2)
- child.removeAttributeNS("http://www.python.org", "abcattr")
- confirm(len(child.attributes) == 1)
-
- dom.unlink()
-
-def testRemoveAttributeNode():
- dom = Document()
- child = dom.appendChild(dom.createElement("foo"))
- child.setAttribute("spam", "jam")
- confirm(len(child.attributes) == 1)
- node = child.getAttributeNode("spam")
- child.removeAttributeNode(node)
- confirm(len(child.attributes) == 0)
-
- dom.unlink()
-
-def testChangeAttr():
- dom = parseString("<abc/>")
- el = dom.documentElement
- el.setAttribute("spam", "jam")
- confirm(len(el.attributes) == 1)
- el.setAttribute("spam", "bam")
- confirm(len(el.attributes) == 1)
- el.attributes["spam"] = "ham"
- confirm(len(el.attributes) == 1)
- el.setAttribute("spam2", "bam")
- confirm(len(el.attributes) == 2)
- el.attributes[ "spam2"] = "bam2"
- confirm(len(el.attributes) == 2)
- dom.unlink()
-
-def testGetAttrList():
- pass
-
-def testGetAttrValues(): pass
-
-def testGetAttrLength(): pass
-
-def testGetAttribute(): pass
-
-def testGetAttributeNS(): pass
-
-def testGetAttributeNode(): pass
-
-def testGetElementsByTagNameNS(): pass
-
-def testGetEmptyNodeListFromElementsByTagNameNS(): pass
-
-def testElementReprAndStr():
- dom = Document()
- el = dom.appendChild(dom.createElement("abc"))
- string1 = repr(el)
- string2 = str(el)
- confirm(string1 == string2)
- dom.unlink()
-
-# commented out until Fredrick's fix is checked in
-def _testElementReprAndStrUnicode():
- dom = Document()
- el = dom.appendChild(dom.createElement(u"abc"))
- string1 = repr(el)
- string2 = str(el)
- confirm(string1 == string2)
- dom.unlink()
-
-# commented out until Fredrick's fix is checked in
-def _testElementReprAndStrUnicodeNS():
- dom = Document()
- el = dom.appendChild(
- dom.createElementNS(u"http://www.slashdot.org", u"slash:abc"))
- string1 = repr(el)
- string2 = str(el)
- confirm(string1 == string2)
- confirm(string1.find("slash:abc") != -1)
- dom.unlink()
- confirm(len(Node.allnodes) == 0)
-
-def testAttributeRepr():
- dom = Document()
- el = dom.appendChild(dom.createElement(u"abc"))
- node = el.setAttribute("abc", "def")
- confirm(str(node) == repr(node))
- dom.unlink()
- confirm(len(Node.allnodes) == 0)
-
-def testTextNodeRepr(): pass
-
-def testWriteXML():
- str = '<a b="c"/>'
- dom = parseString(str)
- domstr = dom.toxml()
- dom.unlink()
- confirm(str == domstr)
- confirm(len(Node.allnodes) == 0)
-
-def testProcessingInstruction(): pass
-
-def testProcessingInstructionRepr(): pass
-
-def testTextRepr(): pass
-
-def testWriteText(): pass
-
-def testDocumentElement(): pass
-
-def testTooManyDocumentElements(): pass
-
-def testCreateElementNS(): pass
-
-def testCreatAttributeNS(): pass
-
-def testParse(): pass
-
-def testParseString(): pass
-
-def testComment(): pass
-
-def testAttrListItem(): pass
-
-def testAttrListItems(): pass
-
-def testAttrListItemNS(): pass
-
-def testAttrListKeys(): pass
-
-def testAttrListKeysNS(): pass
-
-def testAttrListValues(): pass
-
-def testAttrListLength(): pass
-
-def testAttrList__getitem__(): pass
-
-def testAttrList__setitem__(): pass
-
-def testSetAttrValueandNodeValue(): pass
-
-def testParseElement(): pass
-
-def testParseAttributes(): pass
-
-def testParseElementNamespaces(): pass
-
-def testParseAttributeNamespaces(): pass
-
-def testParseProcessingInstructions(): pass
-
-def testChildNodes(): pass
-
-def testFirstChild(): pass
-
-def testHasChildNodes(): pass
-
-def testCloneElementShallow(): pass
-
-def testCloneElementShallowCopiesAttributes(): pass
-
-def testCloneElementDeep(): pass
-
-def testCloneDocumentShallow(): pass
-
-def testCloneDocumentDeep(): pass
-
-def testCloneAttributeShallow(): pass
-
-def testCloneAttributeDeep(): pass
-
-def testClonePIShallow(): pass
-
-def testClonePIDeep(): pass
-
-def testSiblings():
- doc = parseString("<doc><?pi?>text?<elm/></doc>")
- root = doc.documentElement
- (pi, text, elm) = root.childNodes
-
- confirm(pi.nextSibling is text and
- pi.previousSibling is None and
- text.nextSibling is elm and
- text.previousSibling is pi and
- elm.nextSibling is None and
- elm.previousSibling is text, "testSiblings")
-
- doc.unlink()
-
-def testParents():
- doc = parseString("<doc><elm1><elm2/><elm2><elm3/></elm2></elm1></doc>")
- root = doc.documentElement
- elm1 = root.childNodes[0]
- (elm2a, elm2b) = elm1.childNodes
- elm3 = elm2b.childNodes[0]
-
- confirm(root.parentNode is doc and
- elm1.parentNode is root and
- elm2a.parentNode is elm1 and
- elm2b.parentNode is elm1 and
- elm3.parentNode is elm2b, "testParents")
-
- doc.unlink()
-
-def testSAX2DOM():
- from xml.dom import pulldom
-
- sax2dom = pulldom.SAX2DOM()
- sax2dom.startDocument()
- sax2dom.startElement("doc", {})
- sax2dom.characters("text")
- sax2dom.startElement("subelm", {})
- sax2dom.characters("text")
- sax2dom.endElement("subelm")
- sax2dom.characters("text")
- sax2dom.endElement("doc")
- sax2dom.endDocument()
-
- doc = sax2dom.document
- root = doc.documentElement
- (text1, elm1, text2) = root.childNodes
- text3 = elm1.childNodes[0]
-
- confirm(text1.previousSibling is None and
- text1.nextSibling is elm1 and
- elm1.previousSibling is text1 and
- elm1.nextSibling is text2 and
- text2.previousSibling is elm1 and
- text2.nextSibling is None and
- text3.previousSibling is None and
- text3.nextSibling is None, "testSAX2DOM - siblings")
-
- confirm(root.parentNode is doc and
- text1.parentNode is root and
- elm1.parentNode is root and
- text2.parentNode is root and
- text3.parentNode is elm1, "testSAX2DOM - parents")
-
- doc.unlink()
-
-# --- MAIN PROGRAM
-
-names = globals().keys()
-names.sort()
-
-works = 1
-
-for name in names:
- if name.startswith("test"):
- func = globals()[name]
- try:
- func()
- print "Test Succeeded", name
- confirm(len(Node.allnodes) == 0,
- "assertion: len(Node.allnodes) == 0")
- if len(Node.allnodes):
- print "Garbage left over:"
- if verbose:
- print Node.allnodes.items()[0:10]
- else:
- # Don't print specific nodes if repeatable results
- # are needed
- print len(Node.allnodes)
- Node.allnodes = {}
- except Exception, e:
- works = 0
- print "Test Failed: ", name
- traceback.print_exception(*sys.exc_info())
- print `e`
- Node.allnodes = {}
-
-if works:
- print "All tests succeeded"
-else:
- print "\n\n\n\n************ Check for failures!"
-
-Node.debug = None # Delete debug output collected in a StringIO object
-Node._debug = 0 # And reset debug mode
diff --git a/Lib/dos-8x3/test_mma.py b/Lib/dos-8x3/test_mma.py
deleted file mode 100644
index 449c674..0000000
--- a/Lib/dos-8x3/test_mma.py
+++ /dev/null
@@ -1,121 +0,0 @@
-
-import mmap
-import string, os, re, sys
-
-PAGESIZE = mmap.PAGESIZE
-
-def test_both():
- "Test mmap module on Unix systems and Windows"
-
- # Create an mmap'ed file
- f = open('foo', 'w+')
-
- # Write 2 pages worth of data to the file
- f.write('\0'* PAGESIZE)
- f.write('foo')
- f.write('\0'* (PAGESIZE-3) )
-
- m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
- f.close()
-
- # Simple sanity checks
- print ' Position of foo:', string.find(m, 'foo') / float(PAGESIZE), 'pages'
- assert string.find(m, 'foo') == PAGESIZE
-
- print ' Length of file:', len(m) / float(PAGESIZE), 'pages'
- assert len(m) == 2*PAGESIZE
-
- print ' Contents of byte 0:', repr(m[0])
- assert m[0] == '\0'
- print ' Contents of first 3 bytes:', repr(m[0:3])
- assert m[0:3] == '\0\0\0'
-
- # Modify the file's content
- print "\n Modifying file's content..."
- m[0] = '3'
- m[PAGESIZE +3: PAGESIZE +3+3]='bar'
-
- # Check that the modification worked
- print ' Contents of byte 0:', repr(m[0])
- assert m[0] == '3'
- print ' Contents of first 3 bytes:', repr(m[0:3])
- assert m[0:3] == '3\0\0'
- print ' Contents of second page:', repr(m[PAGESIZE-1 : PAGESIZE + 7])
- assert m[PAGESIZE-1 : PAGESIZE + 7] == '\0foobar\0'
-
- m.flush()
-
- # Test doing a regular expression match in an mmap'ed file
- match=re.search('[A-Za-z]+', m)
- if match == None:
- print ' ERROR: regex match on mmap failed!'
- else:
- start, end = match.span(0)
- length = end - start
-
- print ' Regex match on mmap (page start, length of match):',
- print start / float(PAGESIZE), length
-
- assert start == PAGESIZE
- assert end == PAGESIZE + 6
-
- # test seeking around (try to overflow the seek implementation)
- m.seek(0,0)
- print ' Seek to zeroth byte'
- assert m.tell() == 0
- m.seek(42,1)
- print ' Seek to 42nd byte'
- assert m.tell() == 42
- m.seek(0,2)
- print ' Seek to last byte'
- assert m.tell() == len(m)
-
- print ' Try to seek to negative position...'
- try:
- m.seek(-1)
- except ValueError:
- pass
- else:
- assert 0, 'expected a ValueError but did not get it'
-
- print ' Try to seek beyond end of mmap...'
- try:
- m.seek(1,2)
- except ValueError:
- pass
- else:
- assert 0, 'expected a ValueError but did not get it'
-
- print ' Try to seek to negative position...'
- try:
- m.seek(-len(m)-1,2)
- except ValueError:
- pass
- else:
- assert 0, 'expected a ValueError but did not get it'
-
- # Try resizing map
- print ' Attempting resize()'
- try:
- m.resize( 512 )
- except SystemError:
- # resize() not supported
- # No messages are printed, since the output of this test suite
- # would then be different across platforms.
- pass
- else:
- # resize() is supported
- assert len(m) == 512, "len(m) is %d, but expecting 512" % (len(m),)
- # Check that we can no longer seek beyond the new size.
- try:
- m.seek(513,0)
- except ValueError:
- pass
- else:
- assert 0, 'Could seek beyond the new size'
-
- m.close()
- os.unlink("foo")
- print ' Test passed'
-
-test_both()
diff --git a/Lib/dos-8x3/test_ntp.py b/Lib/dos-8x3/test_ntp.py
deleted file mode 100644
index 11f2f44..0000000
--- a/Lib/dos-8x3/test_ntp.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import ntpath
-import string
-import os
-
-errors = 0
-
-def tester(fn, wantResult):
- fn = string.replace(fn, "\\", "\\\\")
- gotResult = eval(fn)
- if wantResult != gotResult:
- print "error!"
- print "evaluated: " + str(fn)
- print "should be: " + str(wantResult)
- print " returned: " + str(gotResult)
- print ""
- global errors
- errors = errors + 1
-
-tester('ntpath.splitdrive("c:\\foo\\bar")', ('c:', '\\foo\\bar'))
-tester('ntpath.splitunc("\\\\conky\\mountpoint\\foo\\bar")', ('\\\\conky\\mountpoint', '\\foo\\bar'))
-tester('ntpath.splitdrive("c:/foo/bar")', ('c:', '/foo/bar'))
-tester('ntpath.splitunc("//conky/mountpoint/foo/bar")', ('//conky/mountpoint', '/foo/bar'))
-
-tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
-tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")', ('\\\\conky\\mountpoint\\foo', 'bar'))
-
-tester('ntpath.split("c:\\")', ('c:\\', ''))
-tester('ntpath.split("\\\\conky\\mountpoint\\")', ('\\\\conky\\mountpoint', ''))
-
-tester('ntpath.split("c:/")', ('c:/', ''))
-tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
-
-tester('ntpath.isabs("c:\\")', 1)
-tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
-tester('ntpath.isabs("\\foo")', 1)
-tester('ntpath.isabs("\\foo\\bar")', 1)
-
-tester('ntpath.abspath("C:\\")', "C:\\")
-
-tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
- "/home/swen")
-tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
- "\\home\\swen\\")
-tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
- "/home/swen/spam")
-
-if errors:
- print str(errors) + " errors."
-else:
- print "No errors. Thank your lucky stars."
-
diff --git a/Lib/dos-8x3/test_opc.py b/Lib/dos-8x3/test_opc.py
deleted file mode 100755
index bdace26..0000000
--- a/Lib/dos-8x3/test_opc.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Python test set -- part 2, opcodes
-
-from test_support import *
-
-
-print '2. Opcodes'
-print 'XXX Not yet fully implemented'
-
-print '2.1 try inside for loop'
-n = 0
-for i in range(10):
- n = n+i
- try: 1/0
- except NameError: pass
- except ZeroDivisionError: pass
- except TypeError: pass
- try: pass
- except: pass
- try: pass
- finally: pass
- n = n+i
-if n <> 90:
- raise TestFailed, 'try inside for'
-
-
-print '2.2 raise class exceptions'
-
-class AClass: pass
-class BClass(AClass): pass
-class CClass: pass
-class DClass(AClass):
- def __init__(self, ignore):
- pass
-
-try: raise AClass()
-except: pass
-
-try: raise AClass()
-except AClass: pass
-
-try: raise BClass()
-except AClass: pass
-
-try: raise BClass()
-except CClass: raise TestFailed
-except: pass
-
-a = AClass()
-b = BClass()
-
-try: raise AClass, b
-except BClass, v:
- if v != b: raise TestFailed
-else: raise TestFailed
-
-try: raise b
-except AClass, v:
- if v != b: raise TestFailed
-
-# not enough arguments
-try: raise BClass, a
-except TypeError: pass
-
-try: raise DClass, a
-except DClass, v:
- if not isinstance(v, DClass):
- raise TestFailed
-
-print '2.3 comparing function objects'
-
-f = eval('lambda: None')
-g = eval('lambda: None')
-if f != g: raise TestFailed
-
-f = eval('lambda a: a')
-g = eval('lambda a: a')
-if f != g: raise TestFailed
-
-f = eval('lambda a=1: a')
-g = eval('lambda a=1: a')
-if f != g: raise TestFailed
-
-f = eval('lambda: 0')
-g = eval('lambda: 1')
-if f == g: raise TestFailed
-
-f = eval('lambda: None')
-g = eval('lambda a: None')
-if f == g: raise TestFailed
-
-f = eval('lambda a: None')
-g = eval('lambda b: None')
-if f == g: raise TestFailed
-
-f = eval('lambda a: None')
-g = eval('lambda a=None: None')
-if f == g: raise TestFailed
-
-f = eval('lambda a=0: None')
-g = eval('lambda a=1: None')
-if f == g: raise TestFailed
diff --git a/Lib/dos-8x3/test_ope.py b/Lib/dos-8x3/test_ope.py
deleted file mode 100755
index 723e57c..0000000
--- a/Lib/dos-8x3/test_ope.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Test to see if openpty works. (But don't worry if it isn't available.)
-
-import os
-from test_support import verbose, TestFailed, TestSkipped
-
-try:
- if verbose:
- print "Calling os.openpty()"
- master, slave = os.openpty()
- if verbose:
- print "(master, slave) = (%d, %d)"%(master, slave)
-except AttributeError:
- raise TestSkipped, "No openpty() available."
-
-if not os.isatty(master):
- raise TestFailed, "Master-end of pty is not a terminal."
-if not os.isatty(slave):
- raise TestFailed, "Slave-end of pty is not a terminal."
-
-os.write(slave, 'Ping!')
-print os.read(master, 1024)
-
diff --git a/Lib/dos-8x3/test_par.py b/Lib/dos-8x3/test_par.py
deleted file mode 100644
index bc41de4..0000000
--- a/Lib/dos-8x3/test_par.py
+++ /dev/null
@@ -1,178 +0,0 @@
-import os.path
-import parser
-import pprint
-import sys
-
-from parser import expr, suite, sequence2ast
-from test_support import verbose
-
-#
-# First, we test that we can generate trees from valid source fragments,
-# and that these valid trees are indeed allowed by the tree-loading side
-# of the parser module.
-#
-
-def roundtrip(f, s):
- st1 = f(s)
- t = st1.totuple()
- st2 = parser.sequence2ast(t)
-
-def roundtrip_fromfile(filename):
- roundtrip(suite, open(filename).read())
-
-def test_expr(s):
- print "expr:", s
- roundtrip(expr, s)
-
-def test_suite(s):
- print "suite:", s
- roundtrip(suite, s)
-
-
-print "Expressions:"
-
-test_expr("foo(1)")
-test_expr("[1, 2, 3]")
-test_expr("[x**3 for x in range(20)]")
-test_expr("[x**3 for x in range(20) if x % 3]")
-test_expr("foo(*args)")
-test_expr("foo(*args, **kw)")
-test_expr("foo(**kw)")
-test_expr("foo(key=value)")
-test_expr("foo(key=value, *args)")
-test_expr("foo(key=value, *args, **kw)")
-test_expr("foo(key=value, **kw)")
-test_expr("foo(a, b, c, *args)")
-test_expr("foo(a, b, c, *args, **kw)")
-test_expr("foo(a, b, c, **kw)")
-test_expr("foo + bar")
-
-print
-print "Statements:"
-test_suite("print")
-test_suite("print 1")
-test_suite("print 1,")
-test_suite("print >>fp")
-test_suite("print >>fp, 1")
-test_suite("print >>fp, 1,")
-
-# expr_stmt
-test_suite("a")
-test_suite("a = b")
-test_suite("a = b = c = d = e")
-test_suite("a += b")
-test_suite("a -= b")
-test_suite("a *= b")
-test_suite("a /= b")
-test_suite("a %= b")
-test_suite("a &= b")
-test_suite("a |= b")
-test_suite("a ^= b")
-test_suite("a <<= b")
-test_suite("a >>= b")
-test_suite("a **= b")
-
-#d = os.path.dirname(os.__file__)
-#roundtrip_fromfile(os.path.join(d, "os.py"))
-#roundtrip_fromfile(os.path.join(d, "test", "test_parser.py"))
-
-#
-# Second, we take *invalid* trees and make sure we get ParserError
-# rejections for them.
-#
-
-print
-print "Invalid parse trees:"
-
-def check_bad_tree(tree, label):
- print
- print label
- try:
- sequence2ast(tree)
- except parser.ParserError:
- print "caught expected exception for invalid tree"
- pass
- else:
- print "test failed: did not properly detect invalid tree:"
- pprint.pprint(tree)
-
-
-# not even remotely valid:
-check_bad_tree((1, 2, 3), "<junk>")
-
-# print >>fp,
-tree = \
-(257,
- (264,
- (265,
- (266,
- (268,
- (1, 'print'),
- (35, '>>'),
- (290,
- (291,
- (292,
- (293,
- (295,
- (296,
- (297,
- (298, (299, (300, (301, (302, (303, (1, 'fp')))))))))))))),
- (12, ','))),
- (4, ''))),
- (0, ''))
-
-check_bad_tree(tree, "print >>fp,")
-
-# a,,c
-tree = \
-(258,
- (311,
- (290,
- (291,
- (292,
- (293,
- (295,
- (296, (297, (298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
- (12, ','),
- (12, ','),
- (290,
- (291,
- (292,
- (293,
- (295,
- (296, (297, (298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
- (4, ''),
- (0, ''))
-
-check_bad_tree(tree, "a,,c")
-
-# a $= b
-tree = \
-(257,
- (264,
- (265,
- (266,
- (267,
- (312,
- (291,
- (292,
- (293,
- (294,
- (296,
- (297,
- (298,
- (299, (300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
- (268, (37, '$=')),
- (312,
- (291,
- (292,
- (293,
- (294,
- (296,
- (297,
- (298,
- (299, (300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
- (4, ''))),
- (0, ''))
-
-check_bad_tree(tree, "a $= b")
diff --git a/Lib/dos-8x3/test_pic.py b/Lib/dos-8x3/test_pic.py
deleted file mode 100644
index 3c81fdd..0000000
--- a/Lib/dos-8x3/test_pic.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Test the pickle module
-
-# break into multiple strings to please font-lock-mode
-DATA = """(lp0
-I0
-aL1L
-aF2.0
-ac__builtin__
-complex
-p1
-""" \
-"""(F3.0
-F0.0
-tp2
-Rp3
-a(S'abc'
-p4
-g4
-""" \
-"""(i__main__
-C
-p5
-""" \
-"""(dp6
-S'foo'
-p7
-I1
-sS'bar'
-p8
-I2
-sbg5
-tp9
-ag9
-aI5
-a.
-"""
-
-BINDATA = ']q\000(K\000L1L\012G@\000\000\000\000\000\000\000c__builtin__\012complex\012q\001(G@\010\000\000\000\000\000\000G\000\000\000\000\000\000\000\000tq\002Rq\003(U\003abcq\004h\004(c__main__\012C\012q\005oq\006}q\007(U\003fooq\010K\001U\003barq\011K\002ubh\006tq\012h\012K\005e.'
-
-class C:
- def __cmp__(self, other):
- return cmp(self.__dict__, other.__dict__)
-
-import __main__
-__main__.C = C
-
-def dotest(pickle):
- c = C()
- c.foo = 1
- c.bar = 2
- x = [0, 1L, 2.0, 3.0+0j]
- y = ('abc', 'abc', c, c)
- x.append(y)
- x.append(y)
- x.append(5)
- r = []
- r.append(r)
- print "dumps()"
- s = pickle.dumps(x)
- print "loads()"
- x2 = pickle.loads(s)
- if x2 == x: print "ok"
- else: print "bad"
- print "loads() DATA"
- x2 = pickle.loads(DATA)
- if x2 == x: print "ok"
- else: print "bad"
- print "dumps() binary"
- s = pickle.dumps(x, 1)
- print "loads() binary"
- x2 = pickle.loads(s)
- if x2 == x: print "ok"
- else: print "bad"
- print "loads() BINDATA"
- x2 = pickle.loads(BINDATA)
- if x2 == x: print "ok"
- else: print "bad"
- s = pickle.dumps(r)
- print "dumps() RECURSIVE"
- x2 = pickle.loads(s)
- if x2 == r: print "ok"
- else: print "bad"
- # don't create cyclic garbage
- del x2[0]
- del r[0]
-
- # Test protection against closed files
- import tempfile, os
- fn = tempfile.mktemp()
- f = open(fn, "w")
- f.close()
- try:
- pickle.dump(123, f)
- except ValueError:
- pass
- else:
- print "dump to closed file should raise ValueError"
- f = open(fn, "r")
- f.close()
- try:
- pickle.load(f)
- except ValueError:
- pass
- else:
- print "load from closed file should raise ValueError"
- os.remove(fn)
-
- # Test specific bad cases
- for i in range(10):
- try:
- x = pickle.loads('garyp')
- except KeyError, y:
- # pickle
- del y
- except pickle.BadPickleGet, y:
- # cPickle
- del y
- else:
- print "unexpected success!"
- break
-
- # Test insecure strings
- insecure = ["abc", "2 + 2", # not quoted
- "'abc' + 'def'", # not a single quoted string
- "'abc", # quote is not closed
- "'abc\"", # open quote and close quote don't match
- "'abc' ?", # junk after close quote
- # some tests of the quoting rules
- "'abc\"\''",
- "'\\\\a\'\'\'\\\'\\\\\''",
- ]
- for s in insecure:
- buf = "S" + s + "\012p0\012."
- try:
- x = pickle.loads(buf)
- except ValueError:
- pass
- else:
- print "accepted insecure string: %s" % repr(buf)
-
-
-import pickle
-dotest(pickle)
diff --git a/Lib/dos-8x3/test_pol.py b/Lib/dos-8x3/test_pol.py
deleted file mode 100644
index 82d8372..0000000
--- a/Lib/dos-8x3/test_pol.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Test case for the os.poll() function
-
-import sys, os, select, random
-from test_support import verbose, TestSkipped, TESTFN
-
-try:
- select.poll
-except AttributeError:
- raise TestSkipped, "select.poll not defined -- skipping test_poll"
-
-
-def find_ready_matching(ready, flag):
- match = []
- for fd, mode in ready:
- if mode & flag:
- match.append(fd)
- return match
-
-def test_poll1():
- """Basic functional test of poll object
-
- Create a bunch of pipe and test that poll works with them.
- """
- print 'Running poll test 1'
- p = select.poll()
-
- NUM_PIPES = 12
- MSG = " This is a test."
- MSG_LEN = len(MSG)
- readers = []
- writers = []
- r2w = {}
- w2r = {}
-
- for i in range(NUM_PIPES):
- rd, wr = os.pipe()
- p.register(rd, select.POLLIN)
- p.register(wr, select.POLLOUT)
- readers.append(rd)
- writers.append(wr)
- r2w[rd] = wr
- w2r[wr] = rd
-
- while writers:
- ready = p.poll()
- ready_writers = find_ready_matching(ready, select.POLLOUT)
- if not ready_writers:
- raise RuntimeError, "no pipes ready for writing"
- wr = random.choice(ready_writers)
- os.write(wr, MSG)
-
- ready = p.poll()
- ready_readers = find_ready_matching(ready, select.POLLIN)
- if not ready_readers:
- raise RuntimeError, "no pipes ready for reading"
- rd = random.choice(ready_readers)
- buf = os.read(rd, MSG_LEN)
- assert len(buf) == MSG_LEN
- print buf
- os.close(r2w[rd]) ; os.close( rd )
- p.unregister( r2w[rd] )
- p.unregister( rd )
- writers.remove(r2w[rd])
-
- poll_unit_tests()
- print 'Poll test 1 complete'
-
-def poll_unit_tests():
- # returns NVAL for invalid file descriptor
- FD = 42
- try:
- os.close(FD)
- except OSError:
- pass
- p = select.poll()
- p.register(FD)
- r = p.poll()
- assert r[0] == (FD, select.POLLNVAL)
-
- f = open(TESTFN, 'w')
- fd = f.fileno()
- p = select.poll()
- p.register(f)
- r = p.poll()
- assert r[0][0] == fd
- f.close()
- r = p.poll()
- assert r[0] == (fd, select.POLLNVAL)
- os.unlink(TESTFN)
-
- # type error for invalid arguments
- p = select.poll()
- try:
- p.register(p)
- except TypeError:
- pass
- else:
- print "Bogus register call did not raise TypeError"
- try:
- p.unregister(p)
- except TypeError:
- pass
- else:
- print "Bogus unregister call did not raise TypeError"
-
- # can't unregister non-existent object
- p = select.poll()
- try:
- p.unregister(3)
- except KeyError:
- pass
- else:
- print "Bogus unregister call did not raise KeyError"
-
- # Test error cases
- pollster = select.poll()
- class Nope:
- pass
-
- class Almost:
- def fileno(self):
- return 'fileno'
-
- try:
- pollster.register( Nope(), 0 )
- except TypeError: pass
- else: print 'expected TypeError exception, not raised'
-
- try:
- pollster.register( Almost(), 0 )
- except TypeError: pass
- else: print 'expected TypeError exception, not raised'
-
-
-# Another test case for poll(). This is copied from the test case for
-# select(), modified to use poll() instead.
-
-def test_poll2():
- print 'Running poll test 2'
- cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
- p = os.popen(cmd, 'r')
- pollster = select.poll()
- pollster.register( p, select.POLLIN )
- for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
- if verbose:
- print 'timeout =', tout
- fdlist = pollster.poll(tout)
- if (fdlist == []):
- continue
- fd, flags = fdlist[0]
- if flags & select.POLLHUP:
- line = p.readline()
- if line != "":
- print 'error: pipe seems to be closed, but still returns data'
- continue
-
- elif flags & select.POLLIN:
- line = p.readline()
- if verbose:
- print `line`
- if not line:
- if verbose:
- print 'EOF'
- break
- continue
- else:
- print 'Unexpected return value from select.poll:', fdlist
- p.close()
- print 'Poll test 2 complete'
-
-test_poll1()
-test_poll2()
diff --git a/Lib/dos-8x3/test_pop.py b/Lib/dos-8x3/test_pop.py
deleted file mode 100644
index 1215847..0000000
--- a/Lib/dos-8x3/test_pop.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#! /usr/bin/env python
-"""Test script for popen2.py
- Christian Tismer
-"""
-
-import os
-
-# popen2 contains its own testing routine
-# which is especially useful to see if open files
-# like stdin can be read successfully by a forked
-# subprocess.
-
-def main():
- print "Test popen2 module:"
- try:
- from os import popen
- except ImportError:
- # if we don't have os.popen, check that
- # we have os.fork. if not, skip the test
- # (by raising an ImportError)
- from os import fork
- import popen2
- popen2._test()
-
-
-def _test():
- # same test as popen2._test(), but using the os.popen*() API
- print "Testing os module:"
- import popen2
- cmd = "cat"
- teststr = "ab cd\n"
- if os.name == "nt":
- cmd = "more"
- # "more" doesn't act the same way across Windows flavors,
- # sometimes adding an extra newline at the start or the
- # end. So we strip whitespace off both ends for comparison.
- expected = teststr.strip()
- print "testing popen2..."
- w, r = os.popen2(cmd)
- w.write(teststr)
- w.close()
- got = r.read()
- if got.strip() != expected:
- raise ValueError("wrote %s read %s" % (`teststr`, `got`))
- print "testing popen3..."
- try:
- w, r, e = os.popen3([cmd])
- except:
- w, r, e = os.popen3(cmd)
- w.write(teststr)
- w.close()
- got = r.read()
- if got.strip() != expected:
- raise ValueError("wrote %s read %s" % (`teststr`, `got`))
- got = e.read()
- if got:
- raise ValueError("unexected %s on stderr" % `got`)
- for inst in popen2._active[:]:
- inst.wait()
- if popen2._active:
- raise ValueError("_active not empty")
- print "All OK"
-
-main()
-_test()
diff --git a/Lib/dos-8x3/test_pos.py b/Lib/dos-8x3/test_pos.py
deleted file mode 100644
index 27bee61..0000000
--- a/Lib/dos-8x3/test_pos.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import posixpath
-import string
-
-errors = 0
-
-def tester(fn, wantResult):
- gotResult = eval(fn)
- if wantResult != gotResult:
- print "error!"
- print "evaluated: " + str(fn)
- print "should be: " + str(wantResult)
- print " returned: " + str(gotResult)
- print ""
- global errors
- errors = errors + 1
-
-tester('posixpath.splitdrive("/foo/bar")', ('', '/foo/bar'))
-
-tester('posixpath.split("/foo/bar")', ('/foo', 'bar'))
-tester('posixpath.split("/")', ('/', ''))
-tester('posixpath.split("foo")', ('', 'foo'))
-
-tester('posixpath.splitext("foo.ext")', ('foo', '.ext'))
-tester('posixpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
-
-tester('posixpath.isabs("/")', 1)
-tester('posixpath.isabs("/foo")', 1)
-tester('posixpath.isabs("/foo/bar")', 1)
-tester('posixpath.isabs("foo/bar")', 0)
-
-tester('posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
- "/home/swen")
-tester('posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"])',
- "/home/swen/")
-tester('posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
- "/home/swen/spam")
-
-if errors:
- print str(errors) + " errors."
-else:
- print "No errors. Thank your lucky stars."
-
diff --git a/Lib/dos-8x3/test_pye.py b/Lib/dos-8x3/test_pye.py
deleted file mode 100644
index a119987..0000000
--- a/Lib/dos-8x3/test_pye.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Very simple test - Parse a file and print what happens
-
-# XXX TypeErrors on calling handlers, or on bad return values from a
-# handler, are obscure and unhelpful.
-
-from xml.parsers import expat
-
-class Outputter:
- def StartElementHandler(self, name, attrs):
- print 'Start element:\n\t', repr(name), attrs
-
- def EndElementHandler(self, name):
- print 'End element:\n\t', repr(name)
-
- def CharacterDataHandler(self, data):
- data = data.strip()
- if data:
- print 'Character data:'
- print '\t', repr(data)
-
- def ProcessingInstructionHandler(self, target, data):
- print 'PI:\n\t', repr(target), repr(data)
-
- def StartNamespaceDeclHandler(self, prefix, uri):
- print 'NS decl:\n\t', repr(prefix), repr(uri)
-
- def EndNamespaceDeclHandler(self, prefix):
- print 'End of NS decl:\n\t', repr(prefix)
-
- def StartCdataSectionHandler(self):
- print 'Start of CDATA section'
-
- def EndCdataSectionHandler(self):
- print 'End of CDATA section'
-
- def CommentHandler(self, text):
- print 'Comment:\n\t', repr(text)
-
- def NotationDeclHandler(self, *args):
- name, base, sysid, pubid = args
- print 'Notation declared:', args
-
- def UnparsedEntityDeclHandler(self, *args):
- entityName, base, systemId, publicId, notationName = args
- print 'Unparsed entity decl:\n\t', args
-
- def NotStandaloneHandler(self, userData):
- print 'Not standalone'
- return 1
-
- def ExternalEntityRefHandler(self, *args):
- context, base, sysId, pubId = args
- print 'External entity ref:', args
- return 1
-
- def DefaultHandler(self, userData):
- pass
-
- def DefaultHandlerExpand(self, userData):
- pass
-
-
-def confirm(ok):
- if ok:
- print "OK."
- else:
- print "Not OK."
-
-out = Outputter()
-parser = expat.ParserCreate(namespace_separator='!')
-
-# Test getting/setting returns_unicode
-parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
-parser.returns_unicode = 1; confirm(parser.returns_unicode == 1)
-parser.returns_unicode = 2; confirm(parser.returns_unicode == 1)
-parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
-
-HANDLER_NAMES = [
- 'StartElementHandler', 'EndElementHandler',
- 'CharacterDataHandler', 'ProcessingInstructionHandler',
- 'UnparsedEntityDeclHandler', 'NotationDeclHandler',
- 'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
- 'CommentHandler', 'StartCdataSectionHandler',
- 'EndCdataSectionHandler',
- 'DefaultHandler', 'DefaultHandlerExpand',
- #'NotStandaloneHandler',
- 'ExternalEntityRefHandler'
- ]
-for name in HANDLER_NAMES:
- setattr(parser, name, getattr(out, name))
-
-data = '''\
-<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
-<?xml-stylesheet href="stylesheet.css"?>
-<!-- comment data -->
-<!DOCTYPE quotations SYSTEM "quotations.dtd" [
-<!ELEMENT root ANY>
-<!NOTATION notation SYSTEM "notation.jpeg">
-<!ENTITY acirc "&#226;">
-<!ENTITY external_entity SYSTEM "entity.file">
-<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
-%unparsed_entity;
-]>
-
-<root attr1="value1" attr2="value2&#8000;">
-<myns:subelement xmlns:myns="http://www.python.org/namespace">
- Contents of subelements
-</myns:subelement>
-<sub2><![CDATA[contents of CDATA section]]></sub2>
-&external_entity;
-</root>
-'''
-
-# Produce UTF-8 output
-parser.returns_unicode = 0
-try:
- parser.Parse(data, 1)
-except expat.error:
- print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
- print '** Line', parser.ErrorLineNumber
- print '** Column', parser.ErrorColumnNumber
- print '** Byte', parser.ErrorByteIndex
-
-# Try the parse again, this time producing Unicode output
-parser = expat.ParserCreate(namespace_separator='!')
-parser.returns_unicode = 1
-
-for name in HANDLER_NAMES:
- setattr(parser, name, getattr(out, name))
-try:
- parser.Parse(data, 1)
-except expat.error:
- print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
- print '** Line', parser.ErrorLineNumber
- print '** Column', parser.ErrorColumnNumber
- print '** Byte', parser.ErrorByteIndex
-
-# Try parsing a file
-parser = expat.ParserCreate(namespace_separator='!')
-parser.returns_unicode = 1
-
-for name in HANDLER_NAMES:
- setattr(parser, name, getattr(out, name))
-import StringIO
-file = StringIO.StringIO(data)
-try:
- parser.ParseFile(file)
-except expat.error:
- print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
- print '** Line', parser.ErrorLineNumber
- print '** Column', parser.ErrorColumnNumber
- print '** Byte', parser.ErrorByteIndex
diff --git a/Lib/dos-8x3/test_reg.py b/Lib/dos-8x3/test_reg.py
deleted file mode 100644
index 6a7f01b..0000000
--- a/Lib/dos-8x3/test_reg.py
+++ /dev/null
@@ -1,110 +0,0 @@
-from test_support import verbose
-import regex
-from regex_syntax import *
-
-re = 'a+b+c+'
-print 'no match:', regex.match(re, 'hello aaaabcccc world')
-print 'successful search:', regex.search(re, 'hello aaaabcccc world')
-try:
- cre = regex.compile('\(' + re)
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-prev = regex.set_syntax(RE_SYNTAX_AWK)
-print 'successful awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-regex.set_syntax(prev)
-print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-
-re = '\(<one>[0-9]+\) *\(<two>[0-9]+\)'
-print 'matching with group names and compile()'
-cre = regex.compile(re)
-print cre.match('801 999')
-try:
- print cre.group('one')
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-print 'matching with group names and symcomp()'
-cre = regex.symcomp(re)
-print cre.match('801 999')
-print cre.group(0)
-print cre.group('one')
-print cre.group(1, 2)
-print cre.group('one', 'two')
-print 'realpat:', cre.realpat
-print 'groupindex:', cre.groupindex
-
-re = 'world'
-cre = regex.compile(re)
-print 'not case folded search:', cre.search('HELLO WORLD')
-cre = regex.compile(re, regex.casefold)
-print 'case folded search:', cre.search('HELLO WORLD')
-
-print '__members__:', cre.__members__
-print 'regs:', cre.regs
-print 'last:', cre.last
-print 'translate:', len(cre.translate)
-print 'givenpat:', cre.givenpat
-
-print 'match with pos:', cre.match('hello world', 7)
-print 'search with pos:', cre.search('hello world there world', 7)
-print 'bogus group:', cre.group(0, 1, 3)
-try:
- print 'no name:', cre.group('one')
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-from regex_tests import *
-if verbose: print 'Running regex_tests test suite'
-
-for t in tests:
- pattern=s=outcome=repl=expected=None
- if len(t)==5:
- pattern, s, outcome, repl, expected = t
- elif len(t)==3:
- pattern, s, outcome = t
- else:
- raise ValueError, ('Test tuples should have 3 or 5 fields',t)
-
- try:
- obj=regex.compile(pattern)
- except regex.error:
- if outcome==SYNTAX_ERROR: pass # Expected a syntax error
- else:
- # Regex syntax errors aren't yet reported, so for
- # the official test suite they'll be quietly ignored.
- pass
- #print '=== Syntax error:', t
- else:
- try:
- result=obj.search(s)
- except regex.error, msg:
- print '=== Unexpected exception', t, repr(msg)
- if outcome==SYNTAX_ERROR:
- # This should have been a syntax error; forget it.
- pass
- elif outcome==FAIL:
- if result==-1: pass # No match, as expected
- else: print '=== Succeeded incorrectly', t
- elif outcome==SUCCEED:
- if result!=-1:
- # Matched, as expected, so now we compute the
- # result string and compare it to our expected result.
- start, end = obj.regs[0]
- found=s[start:end]
- groups=obj.group(1,2,3,4,5,6,7,8,9,10)
- vardict=vars()
- for i in range(len(groups)):
- vardict['g'+str(i+1)]=str(groups[i])
- repl=eval(repl)
- if repl!=expected:
- print '=== grouping error', t, repr(repl)+' should be '+repr(expected)
- else:
- print '=== Failed incorrectly', t
diff --git a/Lib/dos-8x3/test_rfc.py b/Lib/dos-8x3/test_rfc.py
deleted file mode 100644
index 36e7a51..0000000
--- a/Lib/dos-8x3/test_rfc.py
+++ /dev/null
@@ -1,126 +0,0 @@
-from test_support import verbose
-import rfc822, sys
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-def test(msg, results):
- fp = StringIO()
- fp.write(msg)
- fp.seek(0)
- m = rfc822.Message(fp)
- i = 0
-
- for n, a in m.getaddrlist('to') + m.getaddrlist('cc'):
- if verbose:
- print 'name:', repr(n), 'addr:', repr(a)
- try:
- mn, ma = results[i][0], results[i][1]
- except IndexError:
- print 'extra parsed address:', repr(n), repr(a)
- continue
- i = i + 1
- if mn == n and ma == a:
- if verbose:
- print ' [matched]'
- else:
- if verbose:
- print ' [no match]'
- print 'not found:', repr(n), repr(a)
-
- out = m.getdate('date')
- if out:
- if verbose:
- print 'Date:', m.getheader('date')
- if out == (1999, 1, 13, 23, 57, 35, 0, 0, 0):
- if verbose:
- print ' [matched]'
- else:
- if verbose:
- print ' [no match]'
- print 'Date conversion failed:', out
-
-# Note: all test cases must have the same date (in various formats),
-# or no date!
-
-test('''Date: Wed, 13 Jan 1999 23:57:35 -0500
-From: Guido van Rossum <guido@CNRI.Reston.VA.US>
-To: "Guido van
- : Rossum" <guido@python.org>
-Subject: test2
-
-test2
-''', [('Guido van\n : Rossum', 'guido@python.org')])
-
-test('''From: Barry <bwarsaw@python.org
-To: guido@python.org (Guido: the Barbarian)
-Subject: nonsense
-Date: Wednesday, January 13 1999 23:57:35 -0500
-
-test''', [('Guido: the Barbarian', 'guido@python.org'),
- ])
-
-test('''From: Barry <bwarsaw@python.org
-To: guido@python.org (Guido: the Barbarian)
-Cc: "Guido: the Madman" <guido@python.org>
-Date: 13-Jan-1999 23:57:35 EST
-
-test''', [('Guido: the Barbarian', 'guido@python.org'),
- ('Guido: the Madman', 'guido@python.org')
- ])
-
-test('''To: "The monster with
- the very long name: Guido" <guido@python.org>
-Date: Wed, 13 Jan 1999 23:57:35 -0500
-
-test''', [('The monster with\n the very long name: Guido',
- 'guido@python.org')])
-
-test('''To: "Amit J. Patel" <amitp@Theory.Stanford.EDU>
-CC: Mike Fletcher <mfletch@vrtelecom.com>,
- "'string-sig@python.org'" <string-sig@python.org>
-Cc: fooz@bat.com, bart@toof.com
-Cc: goit@lip.com
-Date: Wed, 13 Jan 1999 23:57:35 -0500
-
-test''', [('Amit J. Patel', 'amitp@Theory.Stanford.EDU'),
- ('Mike Fletcher', 'mfletch@vrtelecom.com'),
- ("'string-sig@python.org'", 'string-sig@python.org'),
- ('', 'fooz@bat.com'),
- ('', 'bart@toof.com'),
- ('', 'goit@lip.com'),
- ])
-
-# This one is just twisted. I don't know what the proper result should be,
-# but it shouldn't be to infloop, which is what used to happen!
-test('''To: <[smtp:dd47@mail.xxx.edu]_at_hmhq@hdq-mdm1-imgout.companay.com>
-Date: Wed, 13 Jan 1999 23:57:35 -0500
-
-test''', [('', ''),
- ('', 'dd47@mail.xxx.edu'),
- ('', '_at_hmhq@hdq-mdm1-imgout.companay.com')
- ])
-
-# This exercises the old commas-in-a-full-name bug, which should be doing the
-# right thing in recent versions of the module.
-test('''To: "last, first" <userid@foo.net>
-
-test''', [('last, first', 'userid@foo.net'),
- ])
-
-test('''To: (Comment stuff) "Quoted name"@somewhere.com
-
-test''', [('Comment stuff', '"Quoted name"@somewhere.com'),
- ])
-
-test('''To: :
-Cc: goit@lip.com
-Date: Wed, 13 Jan 1999 23:57:35 -0500
-
-test''', [('', 'goit@lip.com')])
-
-
-test('''To: guido@[132.151.1.21]
-
-foo''', [('', 'guido@[132.151.1.21]')])
diff --git a/Lib/dos-8x3/test_rgb.py b/Lib/dos-8x3/test_rgb.py
deleted file mode 100755
index 1fa201d..0000000
--- a/Lib/dos-8x3/test_rgb.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Testing rgbimg module
-
-import rgbimg, os, uu
-
-from test_support import verbose, unlink, findfile
-
-class error(Exception):
- pass
-
-print 'RGBimg test suite:'
-
-def testimg(rgb_file, raw_file):
- rgb_file = findfile(rgb_file)
- raw_file = findfile(raw_file)
- width, height = rgbimg.sizeofimage(rgb_file)
- rgb = rgbimg.longimagedata(rgb_file)
- if len(rgb) != width * height * 4:
- raise error, 'bad image length'
- raw = open(raw_file, 'rb').read()
- if rgb != raw:
- raise error, \
- 'images don\'t match for '+rgb_file+' and '+raw_file
- for depth in [1, 3, 4]:
- rgbimg.longstoimage(rgb, width, height, depth, '@.rgb')
- os.unlink('@.rgb')
-
-table = [
- ('testrgb.uue', 'test.rgb'),
- ('testimg.uue', 'test.rawimg'),
- ('testimgr.uue', 'test.rawimg.rev'),
- ]
-for source, target in table:
- source = findfile(source)
- target = findfile(target)
- if verbose:
- print "uudecoding", source, "->", target, "..."
- uu.decode(source, target)
-
-if verbose:
- print "testing..."
-
-ttob = rgbimg.ttob(0)
-if ttob != 0:
- raise error, 'ttob should start out as zero'
-
-testimg('test.rgb', 'test.rawimg')
-
-ttob = rgbimg.ttob(1)
-if ttob != 0:
- raise error, 'ttob should be zero'
-
-testimg('test.rgb', 'test.rawimg.rev')
-
-ttob = rgbimg.ttob(0)
-if ttob != 1:
- raise error, 'ttob should be one'
-
-ttob = rgbimg.ttob(0)
-if ttob != 0:
- raise error, 'ttob should be zero'
-
-for source, target in table:
- unlink(findfile(target))
diff --git a/Lib/dos-8x3/test_rot.py b/Lib/dos-8x3/test_rot.py
deleted file mode 100644
index a5c02aa..0000000
--- a/Lib/dos-8x3/test_rot.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import rotor
-
-r = rotor.newrotor("you'll never guess this")
-r = rotor.newrotor("you'll never guess this", 12)
-
-A = 'spam and eggs'
-B = 'cheese shop'
-
-a = r.encrypt(A)
-print `a`
-b = r.encryptmore(B)
-print `b`
-
-A1 = r.decrypt(a)
-print A1
-if A1 <> A:
- print 'decrypt failed'
-
-B1 = r.decryptmore(b)
-print B1
-if B1 <> B:
- print 'decryptmore failed'
-
-try:
- r.setkey()
-except TypeError:
- pass
-r.setkey('you guessed it!')
diff --git a/Lib/dos-8x3/test_sel.py b/Lib/dos-8x3/test_sel.py
deleted file mode 100755
index b198cf1..0000000
--- a/Lib/dos-8x3/test_sel.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Testing select module
-from test_support import verbose
-import select
-import os
-
-# test some known error conditions
-try:
- rfd, wfd, xfd = select.select(1, 2, 3)
-except TypeError:
- pass
-else:
- print 'expected TypeError exception not raised'
-
-class Nope:
- pass
-
-class Almost:
- def fileno(self):
- return 'fileno'
-
-try:
- rfd, wfd, xfd = select.select([Nope()], [], [])
-except TypeError:
- pass
-else:
- print 'expected TypeError exception not raised'
-
-try:
- rfd, wfd, xfd = select.select([Almost()], [], [])
-except TypeError:
- pass
-else:
- print 'expected TypeError exception not raised'
-
-
-def test():
- import sys
- if sys.platform[:3] in ('win', 'mac', 'os2'):
- if verbose:
- print "Can't test select easily on", sys.platform
- return
- cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
- p = os.popen(cmd, 'r')
- for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
- if verbose:
- print 'timeout =', tout
- rfd, wfd, xfd = select.select([p], [], [], tout)
- if (rfd, wfd, xfd) == ([], [], []):
- continue
- if (rfd, wfd, xfd) == ([p], [], []):
- line = p.readline()
- if verbose:
- print `line`
- if not line:
- if verbose:
- print 'EOF'
- break
- continue
- print 'Unexpected return values from select():', rfd, wfd, xfd
- p.close()
-
-test()
-
diff --git a/Lib/dos-8x3/test_sig.py b/Lib/dos-8x3/test_sig.py
deleted file mode 100755
index 02b5dc3..0000000
--- a/Lib/dos-8x3/test_sig.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Test the signal module
-from test_support import verbose, TestSkipped
-import signal
-import os
-import sys
-
-if sys.platform[:3] in ('win', 'os2'):
- raise TestSkipped, "Can't test signal on %s" % sys.platform[:3]
-
-if verbose:
- x = '-x'
-else:
- x = '+x'
-pid = os.getpid()
-
-# Shell script that will send us asynchronous signals
-script = """
- (
- set %(x)s
- sleep 2
- kill -5 %(pid)d
- sleep 2
- kill -2 %(pid)d
- sleep 2
- kill -3 %(pid)d
- ) &
-""" % vars()
-
-def handlerA(*args):
- if verbose:
- print "handlerA", args
-
-HandlerBCalled = "HandlerBCalled" # Exception
-
-def handlerB(*args):
- if verbose:
- print "handlerB", args
- raise HandlerBCalled, args
-
-signal.alarm(20) # Entire test lasts at most 20 sec.
-signal.signal(5, handlerA)
-signal.signal(2, handlerB)
-signal.signal(3, signal.SIG_IGN)
-signal.signal(signal.SIGALRM, signal.default_int_handler)
-
-os.system(script)
-
-print "starting pause() loop..."
-
-try:
- while 1:
- if verbose:
- print "call pause()..."
- try:
- signal.pause()
- if verbose:
- print "pause() returned"
- except HandlerBCalled:
- if verbose:
- print "HandlerBCalled exception caught"
- else:
- pass
-
-except KeyboardInterrupt:
- if verbose:
- print "KeyboardInterrupt (assume the alarm() went off)"
diff --git a/Lib/dos-8x3/test_soc.py b/Lib/dos-8x3/test_soc.py
deleted file mode 100644
index 92b9336..0000000
--- a/Lib/dos-8x3/test_soc.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Not tested:
-# socket.fromfd()
-# sktobj.getsockopt()
-# sktobj.recvfrom()
-# sktobj.sendto()
-# sktobj.setblocking()
-# sktobj.setsockopt()
-# sktobj.shutdown()
-
-
-from test_support import verbose, TestFailed
-import socket
-import os
-import time
-import string
-
-def missing_ok(str):
- try:
- getattr(socket, str)
- except AttributeError:
- pass
-
-try: raise socket.error
-except socket.error: print "socket.error"
-
-socket.AF_INET
-
-socket.SOCK_STREAM
-socket.SOCK_DGRAM
-socket.SOCK_RAW
-socket.SOCK_RDM
-socket.SOCK_SEQPACKET
-
-for optional in ("AF_UNIX",
-
- "SO_DEBUG", "SO_ACCEPTCONN", "SO_REUSEADDR", "SO_KEEPALIVE",
- "SO_DONTROUTE", "SO_BROADCAST", "SO_USELOOPBACK", "SO_LINGER",
- "SO_OOBINLINE", "SO_REUSEPORT", "SO_SNDBUF", "SO_RCVBUF",
- "SO_SNDLOWAT", "SO_RCVLOWAT", "SO_SNDTIMEO", "SO_RCVTIMEO",
- "SO_ERROR", "SO_TYPE", "SOMAXCONN",
-
- "MSG_OOB", "MSG_PEEK", "MSG_DONTROUTE", "MSG_EOR",
- "MSG_TRUNC", "MSG_CTRUNC", "MSG_WAITALL", "MSG_BTAG",
- "MSG_ETAG",
-
- "SOL_SOCKET",
-
- "IPPROTO_IP", "IPPROTO_ICMP", "IPPROTO_IGMP",
- "IPPROTO_GGP", "IPPROTO_TCP", "IPPROTO_EGP",
- "IPPROTO_PUP", "IPPROTO_UDP", "IPPROTO_IDP",
- "IPPROTO_HELLO", "IPPROTO_ND", "IPPROTO_TP",
- "IPPROTO_XTP", "IPPROTO_EON", "IPPROTO_BIP",
- "IPPROTO_RAW", "IPPROTO_MAX",
-
- "IPPORT_RESERVED", "IPPORT_USERRESERVED",
-
- "INADDR_ANY", "INADDR_BROADCAST", "INADDR_LOOPBACK",
- "INADDR_UNSPEC_GROUP", "INADDR_ALLHOSTS_GROUP",
- "INADDR_MAX_LOCAL_GROUP", "INADDR_NONE",
-
- "IP_OPTIONS", "IP_HDRINCL", "IP_TOS", "IP_TTL",
- "IP_RECVOPTS", "IP_RECVRETOPTS", "IP_RECVDSTADDR",
- "IP_RETOPTS", "IP_MULTICAST_IF", "IP_MULTICAST_TTL",
- "IP_MULTICAST_LOOP", "IP_ADD_MEMBERSHIP",
- "IP_DROP_MEMBERSHIP",
- ):
- missing_ok(optional)
-
-socktype = socket.SocketType
-hostname = socket.gethostname()
-ip = socket.gethostbyname(hostname)
-hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
-all_host_names = [hname] + aliases
-
-if verbose:
- print hostname
- print ip
- print hname, aliases, ipaddrs
- print all_host_names
-
-for name in all_host_names:
- if string.find(name, '.'):
- break
-else:
- print 'FQDN not found'
-
-print socket.getservbyname('telnet', 'tcp')
-try:
- socket.getservbyname('telnet', 'udp')
-except socket.error:
- pass
-
-
-canfork = hasattr(os, 'fork')
-try:
- PORT = 50007
- if not canfork or os.fork():
- # parent is server
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("127.0.0.1", PORT))
- s.listen(1)
- if verbose:
- print 'parent accepting'
- if canfork:
- conn, addr = s.accept()
- if verbose:
- print 'connected by', addr
- # couple of interesting tests while we've got a live socket
- f = conn.fileno()
- if verbose:
- print 'fileno:', f
- p = conn.getpeername()
- if verbose:
- print 'peer:', p
- n = conn.getsockname()
- if verbose:
- print 'sockname:', n
- f = conn.makefile()
- if verbose:
- print 'file obj:', f
- while 1:
- data = conn.recv(1024)
- if not data:
- break
- if verbose:
- print 'received:', data
- conn.send(data)
- conn.close()
- else:
- try:
- # child is client
- time.sleep(5)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if verbose:
- print 'child connecting'
- s.connect(("127.0.0.1", PORT))
- msg = 'socket test'
- s.send(msg)
- data = s.recv(1024)
- if msg <> data:
- print 'parent/client mismatch'
- s.close()
- finally:
- os._exit(1)
-except socket.error, msg:
- raise TestFailed, msg
diff --git a/Lib/dos-8x3/test_str.py b/Lib/dos-8x3/test_str.py
deleted file mode 100644
index ea237cd..0000000
--- a/Lib/dos-8x3/test_str.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Tests StringIO and cStringIO
-
-def do_test(module):
- s = ("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"+'\n')*5
- f = module.StringIO(s)
- print f.read(10)
- print f.readline()
- print len(f.readlines(60))
-
- f = module.StringIO()
- f.write('abcdef')
- f.seek(3)
- f.write('uvwxyz')
- f.write('!')
- print `f.getvalue()`
- f.close()
- f = module.StringIO()
- f.write(s)
- f.seek(10)
- f.truncate()
- print `f.getvalue()`
- f.seek(0)
- f.truncate(5)
- print `f.getvalue()`
- f.close()
- try:
- f.write("frobnitz")
- except ValueError, e:
- print "Caught expected ValueError writing to closed StringIO:"
- print e
- else:
- print "Failed to catch ValueError writing to closed StringIO."
-
-# Don't bother testing cStringIO without
-import StringIO, cStringIO
-do_test(StringIO)
-do_test(cStringIO)
diff --git a/Lib/dos-8x3/test_sun.py b/Lib/dos-8x3/test_sun.py
deleted file mode 100644
index af18761..0000000
--- a/Lib/dos-8x3/test_sun.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from test_support import verbose, findfile, TestFailed
-import sunaudiodev
-import os
-
-def play_sound_file(path):
- fp = open(path, 'r')
- data = fp.read()
- fp.close()
- try:
- a = sunaudiodev.open('w')
- except sunaudiodev.error, msg:
- raise TestFailed, msg
- else:
- a.write(data)
- a.close()
-
-def test():
- play_sound_file(findfile('audiotest.au'))
-
-test()
diff --git a/Lib/dos-8x3/test_sup.py b/Lib/dos-8x3/test_sup.py
deleted file mode 100755
index 99bacda..0000000
--- a/Lib/dos-8x3/test_sup.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Supporting definitions for the Python regression test."""
-
-
-class Error(Exception):
- """Base class for regression test exceptions."""
-
-class TestFailed(Error):
- """Test failed."""
-
-class TestSkipped(Error):
- """Test skipped.
-
- This can be raised to indicate that a test was deliberatly
- skipped, but not because a feature wasn't available. For
- example, if some resource can't be used, such as the network
- appears to be unavailable, this should be raised instead of
- TestFailed.
-
- """
-
-
-verbose = 1 # Flag set to 0 by regrtest.py
-use_large_resources = 1 # Flag set to 0 by regrtest.py
-
-def unload(name):
- import sys
- try:
- del sys.modules[name]
- except KeyError:
- pass
-
-def forget(modname):
- unload(modname)
- import sys, os
- for dirname in sys.path:
- try:
- os.unlink(os.path.join(dirname, modname + '.pyc'))
- except os.error:
- pass
-
-FUZZ = 1e-6
-
-def fcmp(x, y): # fuzzy comparison function
- if type(x) == type(0.0) or type(y) == type(0.0):
- try:
- x, y = coerce(x, y)
- fuzz = (abs(x) + abs(y)) * FUZZ
- if abs(x-y) <= fuzz:
- return 0
- except:
- pass
- elif type(x) == type(y) and type(x) in (type(()), type([])):
- for i in range(min(len(x), len(y))):
- outcome = fcmp(x[i], y[i])
- if outcome <> 0:
- return outcome
- return cmp(len(x), len(y))
- return cmp(x, y)
-
-TESTFN = '@test' # Filename used for testing
-from os import unlink
-
-def findfile(file, here=__file__):
- import os
- if os.path.isabs(file):
- return file
- import sys
- path = sys.path
- path = [os.path.dirname(here)] + path
- for dn in path:
- fn = os.path.join(dn, file)
- if os.path.exists(fn): return fn
- return file
diff --git a/Lib/dos-8x3/test_thr.py b/Lib/dos-8x3/test_thr.py
deleted file mode 100755
index 710fb89..0000000
--- a/Lib/dos-8x3/test_thr.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Very rudimentary test of thread module
-
-# Create a bunch of threads, let each do some work, wait until all are done
-
-from test_support import verbose
-import random
-import thread
-import time
-
-mutex = thread.allocate_lock()
-rmutex = thread.allocate_lock() # for calls to random
-running = 0
-done = thread.allocate_lock()
-done.acquire()
-
-numtasks = 10
-
-def task(ident):
- global running
- rmutex.acquire()
- delay = random.random() * numtasks
- rmutex.release()
- if verbose:
- print 'task', ident, 'will run for', round(delay, 1), 'sec'
- time.sleep(delay)
- if verbose:
- print 'task', ident, 'done'
- mutex.acquire()
- running = running - 1
- if running == 0:
- done.release()
- mutex.release()
-
-next_ident = 0
-def newtask():
- global next_ident, running
- mutex.acquire()
- next_ident = next_ident + 1
- if verbose:
- print 'creating task', next_ident
- thread.start_new_thread(task, (next_ident,))
- running = running + 1
- mutex.release()
-
-for i in range(numtasks):
- newtask()
-
-print 'waiting for all tasks to complete'
-done.acquire()
-print 'all tasks done'
-
-class barrier:
- def __init__(self, n):
- self.n = n
- self.waiting = 0
- self.checkin = thread.allocate_lock()
- self.checkout = thread.allocate_lock()
- self.checkout.acquire()
-
- def enter(self):
- checkin, checkout = self.checkin, self.checkout
-
- checkin.acquire()
- self.waiting = self.waiting + 1
- if self.waiting == self.n:
- self.waiting = self.n - 1
- checkout.release()
- return
- checkin.release()
-
- checkout.acquire()
- self.waiting = self.waiting - 1
- if self.waiting == 0:
- checkin.release()
- return
- checkout.release()
-
-numtrips = 3
-def task2(ident):
- global running
- for i in range(numtrips):
- if ident == 0:
- # give it a good chance to enter the next
- # barrier before the others are all out
- # of the current one
- delay = 0.001
- else:
- rmutex.acquire()
- delay = random.random() * numtasks
- rmutex.release()
- if verbose:
- print 'task', ident, 'will run for', round(delay, 1), 'sec'
- time.sleep(delay)
- if verbose:
- print 'task', ident, 'entering barrier', i
- bar.enter()
- if verbose:
- print 'task', ident, 'leaving barrier', i
- mutex.acquire()
- running = running - 1
- if running == 0:
- done.release()
- mutex.release()
-
-print '\n*** Barrier Test ***'
-if done.acquire(0):
- raise ValueError, "'done' should have remained acquired"
-bar = barrier(numtasks)
-running = numtasks
-for i in range(numtasks):
- thread.start_new_thread(task2, (i,))
-done.acquire()
-print 'all tasks done'
diff --git a/Lib/dos-8x3/test_tim.py b/Lib/dos-8x3/test_tim.py
deleted file mode 100644
index 03d081e..0000000
--- a/Lib/dos-8x3/test_tim.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import time
-
-time.altzone
-time.clock()
-t = time.time()
-time.asctime(time.gmtime(t))
-if time.ctime(t) <> time.asctime(time.localtime(t)):
- print 'time.ctime(t) <> time.asctime(time.localtime(t))'
-
-time.daylight
-if long(time.mktime(time.localtime(t))) <> long(t):
- print 'time.mktime(time.localtime(t)) <> t'
-
-time.sleep(1.2)
-tt = time.gmtime(t)
-for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
- 'j', 'm', 'M', 'p', 'S',
- 'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
- format = ' %' + directive
- try:
- time.strftime(format, tt)
- except ValueError:
- print 'conversion specifier:', format, ' failed.'
-
-time.timezone
-time.tzname
-
-# expected errors
-try:
- time.asctime(0)
-except TypeError:
- pass
-
-try:
- time.mktime((999999, 999999, 999999, 999999,
- 999999, 999999, 999999, 999999,
- 999999))
-except OverflowError:
- pass
diff --git a/Lib/dos-8x3/test_tok.py b/Lib/dos-8x3/test_tok.py
deleted file mode 100644
index cd97e9a..0000000
--- a/Lib/dos-8x3/test_tok.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from test_support import verbose, findfile
-import tokenize, os, sys
-
-if verbose:
- print 'starting...'
-file = open(findfile('tokenize_tests.py'))
-tokenize.tokenize(file.readline)
-if verbose:
- print 'finished'
-
diff --git a/Lib/dos-8x3/test_typ.py b/Lib/dos-8x3/test_typ.py
deleted file mode 100755
index e3a51f0..0000000
--- a/Lib/dos-8x3/test_typ.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Python test set -- part 6, built-in types
-
-from test_support import *
-
-print '6. Built-in types'
-
-print '6.1 Truth value testing'
-if None: raise TestFailed, 'None is true instead of false'
-if 0: raise TestFailed, '0 is true instead of false'
-if 0L: raise TestFailed, '0L is true instead of false'
-if 0.0: raise TestFailed, '0.0 is true instead of false'
-if '': raise TestFailed, '\'\' is true instead of false'
-if (): raise TestFailed, '() is true instead of false'
-if []: raise TestFailed, '[] is true instead of false'
-if {}: raise TestFailed, '{} is true instead of false'
-if not 1: raise TestFailed, '1 is false instead of true'
-if not 1L: raise TestFailed, '1L is false instead of true'
-if not 1.0: raise TestFailed, '1.0 is false instead of true'
-if not 'x': raise TestFailed, '\'x\' is false instead of true'
-if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
-if not [1]: raise TestFailed, '[1] is false instead of true'
-if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
-def f(): pass
-class C: pass
-import sys
-x = C()
-if not f: raise TestFailed, 'f is false instead of true'
-if not C: raise TestFailed, 'C is false instead of true'
-if not sys: raise TestFailed, 'sys is false instead of true'
-if not x: raise TestFailed, 'x is false instead of true'
-
-print '6.2 Boolean operations'
-if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
-if 1 and 1: pass
-else: raise TestFailed, '1 and 1 is false instead of false'
-if not 1: raise TestFailed, 'not 1 is true instead of false'
-
-print '6.3 Comparisons'
-if 0 < 1 <= 1 == 1 >= 1 > 0 <> 1: pass
-else: raise TestFailed, 'int comparisons failed'
-if 0L < 1L <= 1L == 1L >= 1L > 0L <> 1L: pass
-else: raise TestFailed, 'long int comparisons failed'
-if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 <> 1.0: pass
-else: raise TestFailed, 'float comparisons failed'
-if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
-else: raise TestFailed, 'string comparisons failed'
-if 0 in [0] and 0 not in [1]: pass
-else: raise TestFailed, 'membership test failed'
-if None is None and [] is not []: pass
-else: raise TestFailed, 'identity test failed'
-
-print '6.4 Numeric types (mostly conversions)'
-if 0 <> 0L or 0 <> 0.0 or 0L <> 0.0: raise TestFailed, 'mixed comparisons'
-if 1 <> 1L or 1 <> 1.0 or 1L <> 1.0: raise TestFailed, 'mixed comparisons'
-if -1 <> -1L or -1 <> -1.0 or -1L <> -1.0:
- raise TestFailed, 'int/long/float value not equal'
-if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
-else: raise TestFailed, 'int() does not round properly'
-if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
-else: raise TestFailed, 'long() does not round properly'
-if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
-else: raise TestFailed, 'float() does not work properly'
-print '6.4.1 32-bit integers'
-if 12 + 24 <> 36: raise TestFailed, 'int op'
-if 12 + (-24) <> -12: raise TestFailed, 'int op'
-if (-12) + 24 <> 12: raise TestFailed, 'int op'
-if (-12) + (-24) <> -36: raise TestFailed, 'int op'
-if not 12 < 24: raise TestFailed, 'int op'
-if not -24 < -12: raise TestFailed, 'int op'
-# Test for a particular bug in integer multiply
-xsize, ysize, zsize = 238, 356, 4
-if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
- raise TestFailed, 'int mul commutativity'
-print '6.4.2 Long integers'
-if 12L + 24L <> 36L: raise TestFailed, 'long op'
-if 12L + (-24L) <> -12L: raise TestFailed, 'long op'
-if (-12L) + 24L <> 12L: raise TestFailed, 'long op'
-if (-12L) + (-24L) <> -36L: raise TestFailed, 'long op'
-if not 12L < 24L: raise TestFailed, 'long op'
-if not -24L < -12L: raise TestFailed, 'long op'
-x = sys.maxint
-if int(long(x)) != x: raise TestFailed, 'long op'
-try: int(long(x)+1L)
-except OverflowError: pass
-else:raise TestFailed, 'long op'
-x = -x
-if int(long(x)) != x: raise TestFailed, 'long op'
-x = x-1
-if int(long(x)) != x: raise TestFailed, 'long op'
-try: int(long(x)-1L)
-except OverflowError: pass
-else:raise TestFailed, 'long op'
-print '6.4.3 Floating point numbers'
-if 12.0 + 24.0 <> 36.0: raise TestFailed, 'float op'
-if 12.0 + (-24.0) <> -12.0: raise TestFailed, 'float op'
-if (-12.0) + 24.0 <> 12.0: raise TestFailed, 'float op'
-if (-12.0) + (-24.0) <> -36.0: raise TestFailed, 'float op'
-if not 12.0 < 24.0: raise TestFailed, 'float op'
-if not -24.0 < -12.0: raise TestFailed, 'float op'
-
-print '6.5 Sequence types'
-
-print '6.5.1 Strings'
-if len('') <> 0: raise TestFailed, 'len(\'\')'
-if len('a') <> 1: raise TestFailed, 'len(\'a\')'
-if len('abcdef') <> 6: raise TestFailed, 'len(\'abcdef\')'
-if 'xyz' + 'abcde' <> 'xyzabcde': raise TestFailed, 'string concatenation'
-if 'xyz'*3 <> 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
-if 0*'abcde' <> '': raise TestFailed, 'string repetition 0*'
-if min('abc') <> 'a' or max('abc') <> 'c': raise TestFailed, 'min/max string'
-if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
-else: raise TestFailed, 'in/not in string'
-x = 'x'*103
-if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
-
-print '6.5.2 Tuples'
-if len(()) <> 0: raise TestFailed, 'len(())'
-if len((1,)) <> 1: raise TestFailed, 'len((1,))'
-if len((1,2,3,4,5,6)) <> 6: raise TestFailed, 'len((1,2,3,4,5,6))'
-if (1,2)+(3,4) <> (1,2,3,4): raise TestFailed, 'tuple concatenation'
-if (1,2)*3 <> (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
-if 0*(1,2,3) <> (): raise TestFailed, 'tuple repetition 0*'
-if min((1,2)) <> 1 or max((1,2)) <> 2: raise TestFailed, 'min/max tuple'
-if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
-else: raise TestFailed, 'in/not in tuple'
-
-print '6.5.3 Lists'
-if len([]) <> 0: raise TestFailed, 'len([])'
-if len([1,]) <> 1: raise TestFailed, 'len([1,])'
-if len([1,2,3,4,5,6]) <> 6: raise TestFailed, 'len([1,2,3,4,5,6])'
-if [1,2]+[3,4] <> [1,2,3,4]: raise TestFailed, 'list concatenation'
-if [1,2]*3 <> [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
-if [1,2]*3L <> [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
-if 0*[1,2,3] <> []: raise TestFailed, 'list repetition 0*'
-if 0L*[1,2,3] <> []: raise TestFailed, 'list repetition 0L*'
-if min([1,2]) <> 1 or max([1,2]) <> 2: raise TestFailed, 'min/max list'
-if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
-else: raise TestFailed, 'in/not in list'
-a = [1, 2, 3, 4, 5]
-a[:-1] = a
-if a != [1, 2, 3, 4, 5, 5]:
- raise TestFailed, "list self-slice-assign (head)"
-a = [1, 2, 3, 4, 5]
-a[1:] = a
-if a != [1, 1, 2, 3, 4, 5]:
- raise TestFailed, "list self-slice-assign (tail)"
-a = [1, 2, 3, 4, 5]
-a[1:-1] = a
-if a != [1, 1, 2, 3, 4, 5, 5]:
- raise TestFailed, "list self-slice-assign (center)"
-
-
-print '6.5.3a Additional list operations'
-a = [0,1,2,3,4]
-a[0L] = 1
-a[1L] = 2
-a[2L] = 3
-if a <> [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
-a[0] = 5
-a[1] = 6
-a[2] = 7
-if a <> [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
-a[-2L] = 88
-a[-1L] = 99
-if a <> [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
-a[-2] = 8
-a[-1] = 9
-if a <> [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
-a[:2] = [0,4]
-a[-3:] = []
-a[1:1] = [1,2,3]
-if a <> [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
-a[ 1L : 4L] = [7,8,9]
-if a <> [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
-del a[1:4]
-if a <> [0,4]: raise TestFailed, 'list slice deletion'
-del a[0]
-if a <> [4]: raise TestFailed, 'list item deletion [0]'
-del a[-1]
-if a <> []: raise TestFailed, 'list item deletion [-1]'
-a=range(0,5)
-del a[1L:4L]
-if a <> [0,4]: raise TestFailed, 'list slice deletion'
-del a[0L]
-if a <> [4]: raise TestFailed, 'list item deletion [0]'
-del a[-1L]
-if a <> []: raise TestFailed, 'list item deletion [-1]'
-a.append(0)
-a.append(1)
-a.append(2)
-if a <> [0,1,2]: raise TestFailed, 'list append'
-a.insert(0, -2)
-a.insert(1, -1)
-a.insert(2,0)
-if a <> [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
-if a.count(0) <> 2: raise TestFailed, ' list count'
-if a.index(0) <> 2: raise TestFailed, 'list index'
-a.remove(0)
-if a <> [-2,-1,0,1,2]: raise TestFailed, 'list remove'
-a.reverse()
-if a <> [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
-a.sort()
-if a <> [-2,-1,0,1,2]: raise TestFailed, 'list sort'
-def revcmp(a, b): return cmp(b, a)
-a.sort(revcmp)
-if a <> [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
-# The following dumps core in unpatched Python 1.5:
-def myComparison(x,y):
- return cmp(x%3, y%7)
-z = range(12)
-z.sort(myComparison)
-
-# Test extreme cases with long ints
-a = [0,1,2,3,4]
-if a[ -pow(2,128L): 3 ] != [0,1,2]:
- raise TestFailed, "list slicing with too-small long integer"
-if a[ 3: pow(2,145L) ] != [3,4]:
- raise TestFailed, "list slicing with too-large long integer"
-
-print '6.6 Mappings == Dictionaries'
-d = {}
-if d.keys() <> []: raise TestFailed, '{}.keys()'
-if d.has_key('a') <> 0: raise TestFailed, '{}.has_key(\'a\')'
-if len(d) <> 0: raise TestFailed, 'len({})'
-d = {'a': 1, 'b': 2}
-if len(d) <> 2: raise TestFailed, 'len(dict)'
-k = d.keys()
-k.sort()
-if k <> ['a', 'b']: raise TestFailed, 'dict keys()'
-if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
-else: raise TestFailed, 'dict keys()'
-if d['a'] <> 1 or d['b'] <> 2: raise TestFailed, 'dict item'
-d['c'] = 3
-d['a'] = 4
-if d['c'] <> 3 or d['a'] <> 4: raise TestFailed, 'dict item assignment'
-del d['b']
-if d <> {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
-d = {1:1, 2:2, 3:3}
-d.clear()
-if d != {}: raise TestFailed, 'dict clear'
-d.update({1:100})
-d.update({2:20})
-d.update({1:1, 2:2, 3:3})
-if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update'
-if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed, 'dict copy'
-if {}.copy() != {}: raise TestFailed, 'empty dict copy'
-# dict.get()
-d = {}
-if d.get('c') != None: raise TestFailed, 'missing {} get, no 2nd arg'
-if d.get('c', 3) != 3: raise TestFailed, 'missing {} get, w/ 2nd arg'
-d = {'a' : 1, 'b' : 2}
-if d.get('c') != None: raise TestFailed, 'missing dict get, no 2nd arg'
-if d.get('c', 3) != 3: raise TestFailed, 'missing dict get, w/ 2nd arg'
-if d.get('a') != 1: raise TestFailed, 'present dict get, no 2nd arg'
-if d.get('a', 3) != 1: raise TestFailed, 'present dict get, w/ 2nd arg'
-# dict.setdefault()
-d = {}
-if d.setdefault('key0') <> None:
- raise TestFailed, 'missing {} setdefault, no 2nd arg'
-if d.setdefault('key0') <> None:
- raise TestFailed, 'present {} setdefault, no 2nd arg'
-d.setdefault('key', []).append(3)
-if d['key'][0] <> 3:
- raise TestFailed, 'missing {} setdefault, w/ 2nd arg'
-d.setdefault('key', []).append(4)
-if len(d['key']) <> 2:
- raise TestFailed, 'present {} setdefault, w/ 2nd arg'
diff --git a/Lib/dos-8x3/test_uni.py b/Lib/dos-8x3/test_uni.py
deleted file mode 100644
index 0c44fbf..0000000
--- a/Lib/dos-8x3/test_uni.py
+++ /dev/null
@@ -1,518 +0,0 @@
-""" Test script for the Unicode implementation.
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-from test_support import verbose
-import sys
-
-def test(method, input, output, *args):
- if verbose:
- print '%s.%s%s =? %s... ' % (repr(input), method, args, output),
- try:
- f = getattr(input, method)
- value = apply(f, args)
- except:
- value = sys.exc_type
- exc = sys.exc_info()[:2]
- else:
- exc = None
- if value != output:
- if verbose:
- print 'no'
- print '*',f, `input`, `output`, `value`
- if exc:
- print ' value == %s: %s' % (exc)
- else:
- if verbose:
- print 'yes'
-
-test('capitalize', u' hello ', u' hello ')
-test('capitalize', u'hello ', u'Hello ')
-
-test('title', u' hello ', u' Hello ')
-test('title', u'hello ', u'Hello ')
-test('title', u"fOrMaT thIs aS titLe String", u'Format This As Title String')
-test('title', u"fOrMaT,thIs-aS*titLe;String", u'Format,This-As*Title;String')
-test('title', u"getInt", u'Getint')
-
-test('find', u'abcdefghiabc', 0, u'abc')
-test('find', u'abcdefghiabc', 9, u'abc', 1)
-test('find', u'abcdefghiabc', -1, u'def', 4)
-
-test('rfind', u'abcdefghiabc', 9, u'abc')
-
-test('lower', u'HeLLo', u'hello')
-test('lower', u'hello', u'hello')
-
-test('upper', u'HeLLo', u'HELLO')
-test('upper', u'HELLO', u'HELLO')
-
-if 0:
- transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
-
- test('maketrans', u'abc', transtable, u'xyz')
- test('maketrans', u'abc', ValueError, u'xyzq')
-
-test('split', u'this is the split function',
- [u'this', u'is', u'the', u'split', u'function'])
-test('split', u'a|b|c|d', [u'a', u'b', u'c', u'd'], u'|')
-test('split', u'a|b|c|d', [u'a', u'b', u'c|d'], u'|', 2)
-test('split', u'a b c d', [u'a', u'b c d'], None, 1)
-test('split', u'a b c d', [u'a', u'b', u'c d'], None, 2)
-test('split', u'a b c d', [u'a', u'b', u'c', u'd'], None, 3)
-test('split', u'a b c d', [u'a', u'b', u'c', u'd'], None, 4)
-test('split', u'a b c d', [u'a b c d'], None, 0)
-test('split', u'a b c d', [u'a', u'b', u'c d'], None, 2)
-test('split', u'a b c d ', [u'a', u'b', u'c', u'd'])
-
-# join now works with any sequence type
-class Sequence:
- def __init__(self): self.seq = 'wxyz'
- def __len__(self): return len(self.seq)
- def __getitem__(self, i): return self.seq[i]
-
-test('join', u' ', u'a b c d', [u'a', u'b', u'c', u'd'])
-test('join', u'', u'abcd', (u'a', u'b', u'c', u'd'))
-test('join', u' ', u'w x y z', Sequence())
-test('join', u' ', TypeError, 7)
-
-class BadSeq(Sequence):
- def __init__(self): self.seq = [7, u'hello', 123L]
-
-test('join', u' ', TypeError, BadSeq())
-
-result = u''
-for i in range(10):
- if i > 0:
- result = result + u':'
- result = result + u'x'*10
-test('join', u':', result, [u'x' * 10] * 10)
-test('join', u':', result, (u'x' * 10,) * 10)
-
-test('strip', u' hello ', u'hello')
-test('lstrip', u' hello ', u'hello ')
-test('rstrip', u' hello ', u' hello')
-test('strip', u'hello', u'hello')
-
-test('swapcase', u'HeLLo cOmpUteRs', u'hEllO CoMPuTErS')
-
-if 0:
- test('translate', u'xyzabcdef', u'xyzxyz', transtable, u'def')
-
- table = string.maketrans('a', u'A')
- test('translate', u'abc', u'Abc', table)
- test('translate', u'xyz', u'xyz', table)
-
-test('replace', u'one!two!three!', u'one@two!three!', u'!', u'@', 1)
-test('replace', u'one!two!three!', u'onetwothree', '!', '')
-test('replace', u'one!two!three!', u'one@two@three!', u'!', u'@', 2)
-test('replace', u'one!two!three!', u'one@two@three@', u'!', u'@', 3)
-test('replace', u'one!two!three!', u'one@two@three@', u'!', u'@', 4)
-test('replace', u'one!two!three!', u'one!two!three!', u'!', u'@', 0)
-test('replace', u'one!two!three!', u'one@two@three@', u'!', u'@')
-test('replace', u'one!two!three!', u'one!two!three!', u'x', u'@')
-test('replace', u'one!two!three!', u'one!two!three!', u'x', u'@', 2)
-
-test('startswith', u'hello', 1, u'he')
-test('startswith', u'hello', 1, u'hello')
-test('startswith', u'hello', 0, u'hello world')
-test('startswith', u'hello', 1, u'')
-test('startswith', u'hello', 0, u'ello')
-test('startswith', u'hello', 1, u'ello', 1)
-test('startswith', u'hello', 1, u'o', 4)
-test('startswith', u'hello', 0, u'o', 5)
-test('startswith', u'hello', 1, u'', 5)
-test('startswith', u'hello', 0, u'lo', 6)
-test('startswith', u'helloworld', 1, u'lowo', 3)
-test('startswith', u'helloworld', 1, u'lowo', 3, 7)
-test('startswith', u'helloworld', 0, u'lowo', 3, 6)
-
-test('endswith', u'hello', 1, u'lo')
-test('endswith', u'hello', 0, u'he')
-test('endswith', u'hello', 1, u'')
-test('endswith', u'hello', 0, u'hello world')
-test('endswith', u'helloworld', 0, u'worl')
-test('endswith', u'helloworld', 1, u'worl', 3, 9)
-test('endswith', u'helloworld', 1, u'world', 3, 12)
-test('endswith', u'helloworld', 1, u'lowo', 1, 7)
-test('endswith', u'helloworld', 1, u'lowo', 2, 7)
-test('endswith', u'helloworld', 1, u'lowo', 3, 7)
-test('endswith', u'helloworld', 0, u'lowo', 4, 7)
-test('endswith', u'helloworld', 0, u'lowo', 3, 8)
-test('endswith', u'ab', 0, u'ab', 0, 1)
-test('endswith', u'ab', 0, u'ab', 0, 0)
-
-test('expandtabs', u'abc\rab\tdef\ng\thi', u'abc\rab def\ng hi')
-test('expandtabs', u'abc\rab\tdef\ng\thi', u'abc\rab def\ng hi', 8)
-test('expandtabs', u'abc\rab\tdef\ng\thi', u'abc\rab def\ng hi', 4)
-test('expandtabs', u'abc\r\nab\tdef\ng\thi', u'abc\r\nab def\ng hi', 4)
-
-if 0:
- test('capwords', u'abc def ghi', u'Abc Def Ghi')
- test('capwords', u'abc\tdef\nghi', u'Abc Def Ghi')
- test('capwords', u'abc\t def \nghi', u'Abc Def Ghi')
-
-# Comparisons:
-print 'Testing Unicode comparisons...',
-assert u'abc' == 'abc'
-assert 'abc' == u'abc'
-assert u'abc' == u'abc'
-assert u'abcd' > 'abc'
-assert 'abcd' > u'abc'
-assert u'abcd' > u'abc'
-assert u'abc' < 'abcd'
-assert 'abc' < u'abcd'
-assert u'abc' < u'abcd'
-print 'done.'
-
-if 0:
- # Move these tests to a Unicode collation module test...
-
- print 'Testing UTF-16 code point order comparisons...',
- #No surrogates, no fixup required.
- assert u'\u0061' < u'\u20ac'
- # Non surrogate below surrogate value, no fixup required
- assert u'\u0061' < u'\ud800\udc02'
-
- # Non surrogate above surrogate value, fixup required
- def test_lecmp(s, s2):
- assert s < s2 , "comparison failed on %s < %s" % (s, s2)
-
- def test_fixup(s):
- s2 = u'\ud800\udc01'
- test_lecmp(s, s2)
- s2 = u'\ud900\udc01'
- test_lecmp(s, s2)
- s2 = u'\uda00\udc01'
- test_lecmp(s, s2)
- s2 = u'\udb00\udc01'
- test_lecmp(s, s2)
- s2 = u'\ud800\udd01'
- test_lecmp(s, s2)
- s2 = u'\ud900\udd01'
- test_lecmp(s, s2)
- s2 = u'\uda00\udd01'
- test_lecmp(s, s2)
- s2 = u'\udb00\udd01'
- test_lecmp(s, s2)
- s2 = u'\ud800\ude01'
- test_lecmp(s, s2)
- s2 = u'\ud900\ude01'
- test_lecmp(s, s2)
- s2 = u'\uda00\ude01'
- test_lecmp(s, s2)
- s2 = u'\udb00\ude01'
- test_lecmp(s, s2)
- s2 = u'\ud800\udfff'
- test_lecmp(s, s2)
- s2 = u'\ud900\udfff'
- test_lecmp(s, s2)
- s2 = u'\uda00\udfff'
- test_lecmp(s, s2)
- s2 = u'\udb00\udfff'
- test_lecmp(s, s2)
-
- test_fixup(u'\ue000')
- test_fixup(u'\uff61')
-
- # Surrogates on both sides, no fixup required
- assert u'\ud800\udc02' < u'\ud84d\udc56'
- print 'done.'
-
-test('ljust', u'abc', u'abc ', 10)
-test('rjust', u'abc', u' abc', 10)
-test('center', u'abc', u' abc ', 10)
-test('ljust', u'abc', u'abc ', 6)
-test('rjust', u'abc', u' abc', 6)
-test('center', u'abc', u' abc ', 6)
-test('ljust', u'abc', u'abc', 2)
-test('rjust', u'abc', u'abc', 2)
-test('center', u'abc', u'abc', 2)
-
-test('islower', u'a', 1)
-test('islower', u'A', 0)
-test('islower', u'\n', 0)
-test('islower', u'\u1FFc', 0)
-test('islower', u'abc', 1)
-test('islower', u'aBc', 0)
-test('islower', u'abc\n', 1)
-
-test('isupper', u'a', 0)
-test('isupper', u'A', 1)
-test('isupper', u'\n', 0)
-test('isupper', u'\u1FFc', 0)
-test('isupper', u'ABC', 1)
-test('isupper', u'AbC', 0)
-test('isupper', u'ABC\n', 1)
-
-test('istitle', u'a', 0)
-test('istitle', u'A', 1)
-test('istitle', u'\n', 0)
-test('istitle', u'\u1FFc', 1)
-test('istitle', u'A Titlecased Line', 1)
-test('istitle', u'A\nTitlecased Line', 1)
-test('istitle', u'A Titlecased, Line', 1)
-test('istitle', u'Greek \u1FFcitlecases ...', 1)
-test('istitle', u'Not a capitalized String', 0)
-test('istitle', u'Not\ta Titlecase String', 0)
-test('istitle', u'Not--a Titlecase String', 0)
-
-test('isalpha', u'a', 1)
-test('isalpha', u'A', 1)
-test('isalpha', u'\n', 0)
-test('isalpha', u'\u1FFc', 1)
-test('isalpha', u'abc', 1)
-test('isalpha', u'aBc123', 0)
-test('isalpha', u'abc\n', 0)
-
-test('isalnum', u'a', 1)
-test('isalnum', u'A', 1)
-test('isalnum', u'\n', 0)
-test('isalnum', u'123abc456', 1)
-test('isalnum', u'a1b3c', 1)
-test('isalnum', u'aBc000 ', 0)
-test('isalnum', u'abc\n', 0)
-
-test('splitlines', u"abc\ndef\n\rghi", [u'abc', u'def', u'', u'ghi'])
-test('splitlines', u"abc\ndef\n\r\nghi", [u'abc', u'def', u'', u'ghi'])
-test('splitlines', u"abc\ndef\r\nghi", [u'abc', u'def', u'ghi'])
-test('splitlines', u"abc\ndef\r\nghi\n", [u'abc', u'def', u'ghi'])
-test('splitlines', u"abc\ndef\r\nghi\n\r", [u'abc', u'def', u'ghi', u''])
-test('splitlines', u"\nabc\ndef\r\nghi\n\r", [u'', u'abc', u'def', u'ghi', u''])
-test('splitlines', u"\nabc\ndef\r\nghi\n\r", [u'\n', u'abc\n', u'def\r\n', u'ghi\n', u'\r'], 1)
-
-test('translate', u"abababc", u'bbbc', {ord('a'):None})
-test('translate', u"abababc", u'iiic', {ord('a'):None, ord('b'):ord('i')})
-test('translate', u"abababc", u'iiix', {ord('a'):None, ord('b'):ord('i'), ord('c'):u'x'})
-
-# Contains:
-print 'Testing Unicode contains method...',
-assert ('a' in u'abdb') == 1
-assert ('a' in u'bdab') == 1
-assert ('a' in u'bdaba') == 1
-assert ('a' in u'bdba') == 1
-assert ('a' in u'bdba') == 1
-assert (u'a' in u'bdba') == 1
-assert (u'a' in u'bdb') == 0
-assert (u'a' in 'bdb') == 0
-assert (u'a' in 'bdba') == 1
-assert (u'a' in ('a',1,None)) == 1
-assert (u'a' in (1,None,'a')) == 1
-assert (u'a' in (1,None,u'a')) == 1
-assert ('a' in ('a',1,None)) == 1
-assert ('a' in (1,None,'a')) == 1
-assert ('a' in (1,None,u'a')) == 1
-assert ('a' in ('x',1,u'y')) == 0
-assert ('a' in ('x',1,None)) == 0
-print 'done.'
-
-# Formatting:
-print 'Testing Unicode formatting strings...',
-assert u"%s, %s" % (u"abc", "abc") == u'abc, abc'
-assert u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, 2, 3) == u'abc, abc, 1, 2.000000, 3.00'
-assert u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, -2, 3) == u'abc, abc, 1, -2.000000, 3.00'
-assert u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.5) == u'abc, abc, -1, -2.000000, 3.50'
-assert u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.57) == u'abc, abc, -1, -2.000000, 3.57'
-assert u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 1003.57) == u'abc, abc, -1, -2.000000, 1003.57'
-assert u"%c" % (u"a",) == u'a'
-assert u"%c" % ("a",) == u'a'
-assert u"%c" % (34,) == u'"'
-assert u"%c" % (36,) == u'$'
-value = u"%r, %r" % (u"abc", "abc")
-if value != u"u'abc', 'abc'":
- print '*** formatting failed for "%s"' % 'u"%r, %r" % (u"abc", "abc")'
-
-assert u"%(x)s, %(y)s" % {'x':u"abc", 'y':"def"} == u'abc, def'
-try:
- value = u"%(x)s, %(ä)s" % {'x':u"abc", u'ä'.encode('utf-8'):"def"}
-except KeyError:
- print '*** formatting failed for "%s"' % "u'abc, def'"
-else:
- assert value == u'abc, def'
-
-# formatting jobs delegated from the string implementation:
-assert '...%(foo)s...' % {'foo':u"abc"} == u'...abc...'
-assert '...%(foo)s...' % {'foo':"abc"} == '...abc...'
-assert '...%(foo)s...' % {u'foo':"abc"} == '...abc...'
-assert '...%(foo)s...' % {u'foo':u"abc"} == u'...abc...'
-assert '...%(foo)s...' % {u'foo':u"abc",'def':123} == u'...abc...'
-assert '...%(foo)s...' % {u'foo':u"abc",u'def':123} == u'...abc...'
-assert '...%s...%s...%s...%s...' % (1,2,3,u"abc") == u'...1...2...3...abc...'
-assert '...%%...%%s...%s...%s...%s...%s...' % (1,2,3,u"abc") == u'...%...%s...1...2...3...abc...'
-assert '...%s...' % u"abc" == u'...abc...'
-print 'done.'
-
-# Test builtin codecs
-print 'Testing builtin codecs...',
-
-# UTF-8 specific encoding tests:
-assert u'\u20ac'.encode('utf-8') == \
- ''.join((chr(0xe2), chr(0x82), chr(0xac)))
-assert u'\ud800\udc02'.encode('utf-8') == \
- ''.join((chr(0xf0), chr(0x90), chr(0x80), chr(0x82)))
-assert u'\ud84d\udc56'.encode('utf-8') == \
- ''.join((chr(0xf0), chr(0xa3), chr(0x91), chr(0x96)))
-# UTF-8 specific decoding tests
-assert unicode(''.join((chr(0xf0), chr(0xa3), chr(0x91), chr(0x96))),
- 'utf-8') == u'\ud84d\udc56'
-assert unicode(''.join((chr(0xf0), chr(0x90), chr(0x80), chr(0x82))),
- 'utf-8') == u'\ud800\udc02'
-assert unicode(''.join((chr(0xe2), chr(0x82), chr(0xac))),
- 'utf-8') == u'\u20ac'
-
-# Other possible utf-8 test cases:
-# * strict decoding testing for all of the
-# UTF8_ERROR cases in PyUnicode_DecodeUTF8
-
-
-
-assert unicode('hello','ascii') == u'hello'
-assert unicode('hello','utf-8') == u'hello'
-assert unicode('hello','utf8') == u'hello'
-assert unicode('hello','latin-1') == u'hello'
-
-class String:
- x = ''
- def __str__(self):
- return self.x
-
-o = String()
-
-o.x = 'abc'
-assert unicode(o) == u'abc'
-assert str(o) == 'abc'
-
-o.x = u'abc'
-assert unicode(o) == u'abc'
-assert str(o) == 'abc'
-
-try:
- u'Andr\202 x'.encode('ascii')
- u'Andr\202 x'.encode('ascii','strict')
-except ValueError:
- pass
-else:
- raise AssertionError, "u'Andr\202'.encode('ascii') failed to raise an exception"
-assert u'Andr\202 x'.encode('ascii','ignore') == "Andr x"
-assert u'Andr\202 x'.encode('ascii','replace') == "Andr? x"
-
-try:
- unicode('Andr\202 x','ascii')
- unicode('Andr\202 x','ascii','strict')
-except ValueError:
- pass
-else:
- raise AssertionError, "unicode('Andr\202') failed to raise an exception"
-assert unicode('Andr\202 x','ascii','ignore') == u"Andr x"
-assert unicode('Andr\202 x','ascii','replace') == u'Andr\uFFFD x'
-
-assert u'hello'.encode('ascii') == 'hello'
-assert u'hello'.encode('utf-8') == 'hello'
-assert u'hello'.encode('utf8') == 'hello'
-assert u'hello'.encode('utf-16-le') == 'h\000e\000l\000l\000o\000'
-assert u'hello'.encode('utf-16-be') == '\000h\000e\000l\000l\000o'
-assert u'hello'.encode('latin-1') == 'hello'
-
-u = u''.join(map(unichr, range(1024)))
-for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
- 'raw_unicode_escape', 'unicode_escape', 'unicode_internal'):
- assert unicode(u.encode(encoding),encoding) == u
-
-u = u''.join(map(unichr, range(256)))
-for encoding in (
- 'latin-1',
- ):
- try:
- assert unicode(u.encode(encoding),encoding) == u
- except AssertionError:
- print '*** codec "%s" failed round-trip' % encoding
- except ValueError,why:
- print '*** codec for "%s" failed: %s' % (encoding, why)
-
-u = u''.join(map(unichr, range(128)))
-for encoding in (
- 'ascii',
- ):
- try:
- assert unicode(u.encode(encoding),encoding) == u
- except AssertionError:
- print '*** codec "%s" failed round-trip' % encoding
- except ValueError,why:
- print '*** codec for "%s" failed: %s' % (encoding, why)
-
-print 'done.'
-
-print 'Testing standard mapping codecs...',
-
-print '0-127...',
-s = ''.join(map(chr, range(128)))
-for encoding in (
- 'cp037', 'cp1026',
- 'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
- 'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
- 'cp863', 'cp865', 'cp866',
- 'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
- 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
- 'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
- 'mac_cyrillic', 'mac_latin2',
-
- 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
- 'cp1256', 'cp1257', 'cp1258',
- 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
-
- 'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
- 'cp1006', 'cp875', 'iso8859_8',
-
- ### These have undefined mappings:
- #'cp424',
-
- ):
- try:
- assert unicode(s,encoding).encode(encoding) == s
- except AssertionError:
- print '*** codec "%s" failed round-trip' % encoding
- except ValueError,why:
- print '*** codec for "%s" failed: %s' % (encoding, why)
-
-print '128-255...',
-s = ''.join(map(chr, range(128,256)))
-for encoding in (
- 'cp037', 'cp1026',
- 'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
- 'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
- 'cp863', 'cp865', 'cp866',
- 'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
- 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
- 'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
- 'mac_cyrillic', 'mac_latin2',
-
- ### These have undefined mappings:
- #'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
- #'cp1256', 'cp1257', 'cp1258',
- #'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
- #'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
-
- ### These fail the round-trip:
- #'cp1006', 'cp875', 'iso8859_8',
-
- ):
- try:
- assert unicode(s,encoding).encode(encoding) == s
- except AssertionError:
- print '*** codec "%s" failed round-trip' % encoding
- except ValueError,why:
- print '*** codec for "%s" failed: %s' % (encoding, why)
-
-print 'done.'
-
-print 'Testing Unicode string concatenation...',
-assert (u"abc" u"def") == u"abcdef"
-assert ("abc" u"def") == u"abcdef"
-assert (u"abc" "def") == u"abcdef"
-assert (u"abc" u"def" "ghi") == u"abcdefghi"
-assert ("abc" "def" u"ghi") == u"abcdefghi"
-print 'done.'
-
diff --git a/Lib/dos-8x3/test_unp.py b/Lib/dos-8x3/test_unp.py
deleted file mode 100644
index 25b94e9..0000000
--- a/Lib/dos-8x3/test_unp.py
+++ /dev/null
@@ -1,144 +0,0 @@
-from test_support import *
-
-t = (1, 2, 3)
-l = [4, 5, 6]
-
-class Seq:
- def __getitem__(self, i):
- if i >= 0 and i < 3: return i
- raise IndexError
-
-a = -1
-b = -1
-c = -1
-
-# unpack tuple
-if verbose:
- print 'unpack tuple'
-a, b, c = t
-if a <> 1 or b <> 2 or c <> 3:
- raise TestFailed
-
-# unpack list
-if verbose:
- print 'unpack list'
-a, b, c = l
-if a <> 4 or b <> 5 or c <> 6:
- raise TestFailed
-
-# unpack implied tuple
-if verbose:
- print 'unpack implied tuple'
-a, b, c = 7, 8, 9
-if a <> 7 or b <> 8 or c <> 9:
- raise TestFailed
-
-# unpack string... fun!
-if verbose:
- print 'unpack string'
-a, b, c = 'one'
-if a <> 'o' or b <> 'n' or c <> 'e':
- raise TestFailed
-
-# unpack generic sequence
-if verbose:
- print 'unpack sequence'
-a, b, c = Seq()
-if a <> 0 or b <> 1 or c <> 2:
- raise TestFailed
-
-# single element unpacking, with extra syntax
-if verbose:
- print 'unpack single tuple/list'
-st = (99,)
-sl = [100]
-a, = st
-if a <> 99:
- raise TestFailed
-b, = sl
-if b <> 100:
- raise TestFailed
-
-# now for some failures
-
-# unpacking non-sequence
-if verbose:
- print 'unpack non-sequence'
-try:
- a, b, c = 7
- raise TestFailed
-except TypeError:
- pass
-
-
-# unpacking tuple of wrong size
-if verbose:
- print 'unpack tuple wrong size'
-try:
- a, b = t
- raise TestFailed
-except ValueError:
- pass
-
-# unpacking list of wrong size
-if verbose:
- print 'unpack list wrong size'
-try:
- a, b = l
- raise TestFailed
-except ValueError:
- pass
-
-
-# unpacking sequence too short
-if verbose:
- print 'unpack sequence too short'
-try:
- a, b, c, d = Seq()
- raise TestFailed
-except ValueError:
- pass
-
-
-# unpacking sequence too long
-if verbose:
- print 'unpack sequence too long'
-try:
- a, b = Seq()
- raise TestFailed
-except ValueError:
- pass
-
-
-# unpacking a sequence where the test for too long raises a different
-# kind of error
-class BozoError(Exception):
- pass
-
-class BadSeq:
- def __getitem__(self, i):
- if i >= 0 and i < 3:
- return i
- elif i == 3:
- raise BozoError
- else:
- raise IndexError
-
-
-# trigger code while not expecting an IndexError
-if verbose:
- print 'unpack sequence too long, wrong error'
-try:
- a, b, c, d, e = BadSeq()
- raise TestFailed
-except BozoError:
- pass
-
-# trigger code while expecting an IndexError
-if verbose:
- print 'unpack sequence too short, wrong error'
-try:
- a, b, c = BadSeq()
- raise TestFailed
-except BozoError:
- pass
diff --git a/Lib/dos-8x3/test_url.py b/Lib/dos-8x3/test_url.py
deleted file mode 100644
index 484acea..0000000
--- a/Lib/dos-8x3/test_url.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Minimal test of the quote function
-import urllib
-
-chars = 'abcdefghijklmnopqrstuvwxyz'\
- '\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356' \
- '\357\360\361\362\363\364\365\366\370\371\372\373\374\375\376\377' \
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
- '\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317' \
- '\320\321\322\323\324\325\326\330\331\332\333\334\335\336'
-
-expected = 'abcdefghijklmnopqrstuvwxyz%df%e0%e1%e2%e3%e4%e5%e6%e7%e8%e9%ea%eb%ec%ed%ee%ef%f0%f1%f2%f3%f4%f5%f6%f8%f9%fa%fb%fc%fd%fe%ffABCDEFGHIJKLMNOPQRSTUVWXYZ%c0%c1%c2%c3%c4%c5%c6%c7%c8%c9%ca%cb%cc%cd%ce%cf%d0%d1%d2%d3%d4%d5%d6%d8%d9%da%db%dc%dd%de'
-
-test = urllib.quote(chars)
-assert test == expected, "urllib.quote problem"
-test2 = urllib.unquote(expected)
-assert test2 == chars
-
-in1 = "abc/def"
-out1_1 = "abc/def"
-out1_2 = "abc%2fdef"
-
-assert urllib.quote(in1) == out1_1, "urllib.quote problem"
-assert urllib.quote(in1, '') == out1_2, "urllib.quote problem"
-
-in2 = "abc?def"
-out2_1 = "abc%3fdef"
-out2_2 = "abc?def"
-
-assert urllib.quote(in2) == out2_1, "urllib.quote problem"
-assert urllib.quote(in2, '?') == out2_2, "urllib.quote problem"
-
-
diff --git a/Lib/dos-8x3/test_use.py b/Lib/dos-8x3/test_use.py
deleted file mode 100644
index 63632f7..0000000
--- a/Lib/dos-8x3/test_use.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Check every path through every method of UserDict
-
-from UserDict import UserDict
-
-d0 = {}
-d1 = {"one": 1}
-d2 = {"one": 1, "two": 2}
-
-# Test constructors
-
-u = UserDict()
-u0 = UserDict(d0)
-u1 = UserDict(d1)
-u2 = UserDict(d2)
-
-uu = UserDict(u)
-uu0 = UserDict(u0)
-uu1 = UserDict(u1)
-uu2 = UserDict(u2)
-
-# Test __repr__
-
-assert str(u0) == str(d0)
-assert repr(u1) == repr(d1)
-assert `u2` == `d2`
-
-# Test __cmp__ and __len__
-
-all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
-for a in all:
- for b in all:
- assert cmp(a, b) == cmp(len(a), len(b))
-
-# Test __getitem__
-
-assert u2["one"] == 1
-try:
- u1["two"]
-except KeyError:
- pass
-else:
- assert 0, "u1['two'] shouldn't exist"
-
-# Test __setitem__
-
-u3 = UserDict(u2)
-u3["two"] = 2
-u3["three"] = 3
-
-# Test __delitem__
-
-del u3["three"]
-try:
- del u3["three"]
-except KeyError:
- pass
-else:
- assert 0, "u3['three'] shouldn't exist"
-
-# Test clear
-
-u3.clear()
-assert u3 == {}
-
-# Test copy()
-
-u2a = u2.copy()
-assert u2a == u2
-
-class MyUserDict(UserDict):
- def display(self): print self
-
-m2 = MyUserDict(u2)
-m2a = m2.copy()
-assert m2a == m2
-
-# Test keys, items, values
-
-assert u2.keys() == d2.keys()
-assert u2.items() == d2.items()
-assert u2.values() == d2.values()
-
-# Test has_key
-
-for i in u2.keys():
- assert u2.has_key(i) == 1
- assert u1.has_key(i) == d1.has_key(i)
- assert u0.has_key(i) == d0.has_key(i)
-
-# Test update
-
-t = UserDict()
-t.update(u2)
-assert t == u2
-
-# Test get
-
-for i in u2.keys():
- assert u2.get(i) == u2[i]
- assert u1.get(i) == d1.get(i)
- assert u0.get(i) == d0.get(i)
diff --git a/Lib/dos-8x3/test_wav.py b/Lib/dos-8x3/test_wav.py
deleted file mode 100644
index a7a5e24..0000000
--- a/Lib/dos-8x3/test_wav.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from test_support import TestFailed
-import os, tempfile
-import wave
-
-def check(t, msg=None):
- if not t:
- raise TestFailed, msg
-
-nchannels = 2
-sampwidth = 2
-framerate = 8000
-nframes = 100
-
-testfile = tempfile.mktemp()
-
-f = wave.open(testfile, 'w')
-f.setnchannels(nchannels)
-f.setsampwidth(sampwidth)
-f.setframerate(framerate)
-f.setnframes(nframes)
-output = '\0' * nframes * nchannels * sampwidth
-f.writeframes(output)
-f.close()
-
-f = wave.open(testfile, 'r')
-check(nchannels == f.getnchannels(), "nchannels")
-check(sampwidth == f.getsampwidth(), "sampwidth")
-check(framerate == f.getframerate(), "framerate")
-check(nframes == f.getnframes(), "nframes")
-input = f.readframes(nframes)
-check(input == output, "data")
-f.close()
-
-os.remove(testfile)
diff --git a/Lib/dos-8x3/test_win.py b/Lib/dos-8x3/test_win.py
deleted file mode 100644
index 18ce7a7..0000000
--- a/Lib/dos-8x3/test_win.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Test the windows specific win32reg module.
-# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
-
-from _winreg import *
-import os, sys
-
-test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
-
-test_data = [
- ("Int Value", 45, REG_DWORD),
- ("String Val", "A string value", REG_SZ,),
- (u"Unicode Val", u"A Unicode value", REG_SZ,),
- ("StringExpand", "The path is %path%", REG_EXPAND_SZ),
- ("UnicodeExpand", u"The path is %path%", REG_EXPAND_SZ),
- ("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
- ("Multi-unicode", [u"Lots", u"of", u"unicode", u"values"], REG_MULTI_SZ),
- ("Multi-mixed", [u"Unicode", u"and", "string", "values"],REG_MULTI_SZ),
- ("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
-]
-
-def WriteTestData(root_key):
- # Set the default value for this key.
- SetValue(root_key, test_key_name, REG_SZ, "Default value")
- key = CreateKey(root_key, test_key_name)
- # Create a sub-key
- sub_key = CreateKey(key, "sub_key")
- # Give the sub-key some named values
-
- for value_name, value_data, value_type in test_data:
- SetValueEx(sub_key, value_name, 0, value_type, value_data)
-
- # Check we wrote as many items as we thought.
- nkeys, nvalues, since_mod = QueryInfoKey(key)
- assert nkeys==1, "Not the correct number of sub keys"
- assert nvalues==1, "Not the correct number of values"
- nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
- assert nkeys==0, "Not the correct number of sub keys"
- assert nvalues==len(test_data), "Not the correct number of values"
- # Close this key this way...
- # (but before we do, copy the key as an integer - this allows
- # us to test that the key really gets closed).
- int_sub_key = int(sub_key)
- CloseKey(sub_key)
- try:
- QueryInfoKey(int_sub_key)
- raise RuntimeError, "It appears the CloseKey() function does not close the actual key!"
- except EnvironmentError:
- pass
- # ... and close that key that way :-)
- int_key = int(key)
- key.Close()
- try:
- QueryInfoKey(int_key)
- raise RuntimeError, "It appears the key.Close() function does not close the actual key!"
- except EnvironmentError:
- pass
-
-def ReadTestData(root_key):
- # Check we can get default value for this key.
- val = QueryValue(root_key, test_key_name)
- assert val=="Default value", "Registry didn't give back the correct value"
-
- key = OpenKey(root_key, test_key_name)
- # Read the sub-keys
- sub_key = OpenKey(key, "sub_key")
- # Check I can enumerate over the values.
- index = 0
- while 1:
- try:
- data = EnumValue(sub_key, index)
- except EnvironmentError:
- break
- assert data in test_data, "Didn't read back the correct test data"
- index = index + 1
- assert index==len(test_data), "Didn't read the correct number of items"
- # Check I can directly access each item
- for value_name, value_data, value_type in test_data:
- read_val, read_typ = QueryValueEx(sub_key, value_name)
- assert read_val==value_data and read_typ == value_type, \
- "Could not directly read the value"
- sub_key.Close()
- # Enumerate our main key.
- read_val = EnumKey(key, 0)
- assert read_val == "sub_key", "Read subkey value wrong"
- try:
- EnumKey(key, 1)
- assert 0, "Was able to get a second key when I only have one!"
- except EnvironmentError:
- pass
-
- key.Close()
-
-def DeleteTestData(root_key):
- key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
- sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
- # It is not necessary to delete the values before deleting
- # the key (although subkeys must not exist). We delete them
- # manually just to prove we can :-)
- for value_name, value_data, value_type in test_data:
- DeleteValue(sub_key, value_name)
-
- nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
- assert nkeys==0 and nvalues==0, "subkey not empty before delete"
- sub_key.Close()
- DeleteKey(key, "sub_key")
-
- try:
- # Shouldnt be able to delete it twice!
- DeleteKey(key, "sub_key")
- assert 0, "Deleting the key twice succeeded"
- except EnvironmentError:
- pass
- key.Close()
- DeleteKey(root_key, test_key_name)
- # Opening should now fail!
- try:
- key = OpenKey(root_key, test_key_name)
- assert 0, "Could open the non-existent key"
- except WindowsError: # Use this error name this time
- pass
-
-def TestAll(root_key):
- WriteTestData(root_key)
- ReadTestData(root_key)
- DeleteTestData(root_key)
-
-# Test on my local machine.
-TestAll(HKEY_CURRENT_USER)
-print "Local registry tests worked"
-try:
- remote_name = sys.argv[sys.argv.index("--remote")+1]
-except (IndexError, ValueError):
- remote_name = None
-
-if remote_name is not None:
- try:
- remote_key = ConnectRegistry(remote_name, HKEY_CURRENT_USER)
- except EnvironmentError, exc:
- print "Could not connect to the remote machine -", exc.strerror
- remote_key = None
- if remote_key is not None:
- TestAll(remote_key)
- print "Remote registry tests worked"
-else:
- print "Remote registry calls can be tested using",
- print "'test_winreg.py --remote \\\\machine_name'"
-
diff --git a/Lib/dos-8x3/test_xml.py b/Lib/dos-8x3/test_xml.py
deleted file mode 100644
index eb868a3..0000000
--- a/Lib/dos-8x3/test_xml.py
+++ /dev/null
@@ -1,25 +0,0 @@
-'''Test module to thest the xmllib module.
- Sjoerd Mullender
-'''
-
-from test_support import verbose
-
-testdoc = """\
-<?xml version="1.0" encoding="UTF-8" standalone='yes' ?>
-<!-- comments aren't allowed before the <?xml?> tag,
- but they are allowed before the <!DOCTYPE> tag -->
-<!DOCTYPE greeting [
- <!ELEMENT greeting (#PCDATA)>
-]>
-<greeting>Hello, world!</greeting>
-"""
-
-import xmllib
-if verbose:
- parser = xmllib.TestXMLParser()
-else:
- parser = xmllib.XMLParser()
-
-for c in testdoc:
- parser.feed(c)
-parser.close()
diff --git a/Lib/dos-8x3/test_zip.py b/Lib/dos-8x3/test_zip.py
deleted file mode 100644
index 3ffdd19..0000000
--- a/Lib/dos-8x3/test_zip.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import zipfile, os
-
-srcname = "junk9630.tmp"
-zipname = "junk9708.tmp"
-
-try:
- fp = open(srcname, "w") # Make a source file with some lines
- for i in range(0, 1000):
- fp.write("Test of zipfile line %d.\n" % i)
- fp.close()
-
- zip = zipfile.ZipFile(zipname, "w") # Create the ZIP archive
- zip.write(srcname, srcname)
- zip.write(srcname, "another.name")
- zip.close()
-
- zip = zipfile.ZipFile(zipname, "r") # Read the ZIP archive
- zip.read("another.name")
- zip.read(srcname)
- zip.close()
-finally:
- if os.path.isfile(srcname): # Remove temporary files
- os.unlink(srcname)
- if os.path.isfile(zipname):
- os.unlink(zipname)
-
diff --git a/Lib/dos-8x3/test_zli.py b/Lib/dos-8x3/test_zli.py
deleted file mode 100644
index d22a2cc..0000000
--- a/Lib/dos-8x3/test_zli.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import zlib
-import sys
-import imp
-import string
-
-try:
- t = imp.find_module('test_zlib')
- file = t[0]
-except ImportError:
- file = open(__file__)
-buf = file.read() * 8
-file.close()
-
-# test the checksums (hex so the test doesn't break on 64-bit machines)
-print hex(zlib.crc32('penguin')), hex(zlib.crc32('penguin', 1))
-print hex(zlib.adler32('penguin')), hex(zlib.adler32('penguin', 1))
-
-# make sure we generate some expected errors
-try:
- zlib.compress('ERROR', zlib.MAX_WBITS + 1)
-except zlib.error, msg:
- print "expecting", msg
-try:
- zlib.compressobj(1, 8, 0)
-except ValueError, msg:
- print "expecting", msg
-try:
- zlib.decompressobj(0)
-except ValueError, msg:
- print "expecting", msg
-
-x = zlib.compress(buf)
-y = zlib.decompress(x)
-if buf != y:
- print "normal compression/decompression failed"
-else:
- print "normal compression/decompression succeeded"
-
-buf = buf * 16
-
-co = zlib.compressobj(8, 8, -15)
-x1 = co.compress(buf)
-x2 = co.flush()
-x = x1 + x2
-
-dc = zlib.decompressobj(-15)
-y1 = dc.decompress(x)
-y2 = dc.flush()
-y = y1 + y2
-if buf != y:
- print "compress/decompression obj failed"
-else:
- print "compress/decompression obj succeeded"
-
-co = zlib.compressobj(2, 8, -12, 9, 1)
-bufs = []
-for i in range(0, len(buf), 256):
- bufs.append(co.compress(buf[i:i+256]))
-bufs.append(co.flush())
-combuf = string.join(bufs, '')
-
-decomp1 = zlib.decompress(combuf, -12, -5)
-if decomp1 != buf:
- print "decompress with init options failed"
-else:
- print "decompress with init options succeeded"
-
-deco = zlib.decompressobj(-12)
-bufs = []
-for i in range(0, len(combuf), 128):
- bufs.append(deco.decompress(combuf[i:i+128]))
-bufs.append(deco.flush())
-decomp2 = string.join(buf, '')
-if decomp2 != buf:
- print "decompressobj with init options failed"
-else:
- print "decompressobj with init options succeeded"
-
-# Test flush() with the various options, using all the different levels
-# in order to provide more variations.
-for sync in [zlib.Z_NO_FLUSH, zlib.Z_SYNC_FLUSH, zlib.Z_FULL_FLUSH]:
- for level in range(10):
- obj = zlib.compressobj( level )
- d = obj.compress( buf[:3000] )
- d = d + obj.flush( sync )
- d = d + obj.compress( buf[3000:] )
- d = d + obj.flush()
- if zlib.decompress(d) != buf:
- print "Decompress failed: flush mode=%i, level=%i" % (sync,level)
- del obj
-
-def ignore():
- """An empty function with a big string.
-
- Make the compression algorithm work a little harder.
- """
-
- """
-LAERTES
-
- O, fear me not.
- I stay too long: but here my father comes.
-
- Enter POLONIUS
-
- A double blessing is a double grace,
- Occasion smiles upon a second leave.
-
-LORD POLONIUS
-
- Yet here, Laertes! aboard, aboard, for shame!
- The wind sits in the shoulder of your sail,
- And you are stay'd for. There; my blessing with thee!
- And these few precepts in thy memory
- See thou character. Give thy thoughts no tongue,
- Nor any unproportioned thought his act.
- Be thou familiar, but by no means vulgar.
- Those friends thou hast, and their adoption tried,
- Grapple them to thy soul with hoops of steel;
- But do not dull thy palm with entertainment
- Of each new-hatch'd, unfledged comrade. Beware
- Of entrance to a quarrel, but being in,
- Bear't that the opposed may beware of thee.
- Give every man thy ear, but few thy voice;
- Take each man's censure, but reserve thy judgment.
- Costly thy habit as thy purse can buy,
- But not express'd in fancy; rich, not gaudy;
- For the apparel oft proclaims the man,
- And they in France of the best rank and station
- Are of a most select and generous chief in that.
- Neither a borrower nor a lender be;
- For loan oft loses both itself and friend,
- And borrowing dulls the edge of husbandry.
- This above all: to thine ownself be true,
- And it must follow, as the night the day,
- Thou canst not then be false to any man.
- Farewell: my blessing season this in thee!
-
-LAERTES
-
- Most humbly do I take my leave, my lord.
-
-LORD POLONIUS
-
- The time invites you; go; your servants tend.
-
-LAERTES
-
- Farewell, Ophelia; and remember well
- What I have said to you.
-
-OPHELIA
-
- 'Tis in my memory lock'd,
- And you yourself shall keep the key of it.
-
-LAERTES
-
- Farewell.
-"""
-
diff --git a/Lib/dos-8x3/threadin.py b/Lib/dos-8x3/threadin.py
deleted file mode 100644
index 4921f0e..0000000
--- a/Lib/dos-8x3/threadin.py
+++ /dev/null
@@ -1,631 +0,0 @@
-"""Proposed new threading module, emulating a subset of Java's threading model."""
-
-import sys
-import time
-import thread
-import traceback
-import StringIO
-
-# Rename some stuff so "from threading import *" is safe
-
-_sys = sys
-del sys
-
-_time = time.time
-_sleep = time.sleep
-del time
-
-_start_new_thread = thread.start_new_thread
-_allocate_lock = thread.allocate_lock
-_get_ident = thread.get_ident
-ThreadError = thread.error
-del thread
-
-_print_exc = traceback.print_exc
-del traceback
-
-_StringIO = StringIO.StringIO
-del StringIO
-
-
-# Debug support (adapted from ihooks.py)
-
-_VERBOSE = 0
-
-if __debug__:
-
- class _Verbose:
-
- def __init__(self, verbose=None):
- if verbose is None:
- verbose = _VERBOSE
- self.__verbose = verbose
-
- def _note(self, format, *args):
- if self.__verbose:
- format = format % args
- format = "%s: %s\n" % (
- currentThread().getName(), format)
- _sys.stderr.write(format)
-
-else:
- # Disable this when using "python -O"
- class _Verbose:
- def __init__(self, verbose=None):
- pass
- def _note(self, *args):
- pass
-
-
-# Synchronization classes
-
-Lock = _allocate_lock
-
-def RLock(*args, **kwargs):
- return apply(_RLock, args, kwargs)
-
-class _RLock(_Verbose):
-
- def __init__(self, verbose=None):
- _Verbose.__init__(self, verbose)
- self.__block = _allocate_lock()
- self.__owner = None
- self.__count = 0
-
- def __repr__(self):
- return "<%s(%s, %d)>" % (
- self.__class__.__name__,
- self.__owner and self.__owner.getName(),
- self.__count)
-
- def acquire(self, blocking=1):
- me = currentThread()
- if self.__owner is me:
- self.__count = self.__count + 1
- if __debug__:
- self._note("%s.acquire(%s): recursive success", self, blocking)
- return 1
- rc = self.__block.acquire(blocking)
- if rc:
- self.__owner = me
- self.__count = 1
- if __debug__:
- self._note("%s.acquire(%s): initial succes", self, blocking)
- else:
- if __debug__:
- self._note("%s.acquire(%s): failure", self, blocking)
- return rc
-
- def release(self):
- me = currentThread()
- assert self.__owner is me, "release() of un-acquire()d lock"
- self.__count = count = self.__count - 1
- if not count:
- self.__owner = None
- self.__block.release()
- if __debug__:
- self._note("%s.release(): final release", self)
- else:
- if __debug__:
- self._note("%s.release(): non-final release", self)
-
- # Internal methods used by condition variables
-
- def _acquire_restore(self, (count, owner)):
- self.__block.acquire()
- self.__count = count
- self.__owner = owner
- if __debug__:
- self._note("%s._acquire_restore()", self)
-
- def _release_save(self):
- if __debug__:
- self._note("%s._release_save()", self)
- count = self.__count
- self.__count = 0
- owner = self.__owner
- self.__owner = None
- self.__block.release()
- return (count, owner)
-
- def _is_owned(self):
- return self.__owner is currentThread()
-
-
-def Condition(*args, **kwargs):
- return apply(_Condition, args, kwargs)
-
-class _Condition(_Verbose):
-
- def __init__(self, lock=None, verbose=None):
- _Verbose.__init__(self, verbose)
- if lock is None:
- lock = RLock()
- self.__lock = lock
- # Export the lock's acquire() and release() methods
- self.acquire = lock.acquire
- self.release = lock.release
- # If the lock defines _release_save() and/or _acquire_restore(),
- # these override the default implementations (which just call
- # release() and acquire() on the lock). Ditto for _is_owned().
- try:
- self._release_save = lock._release_save
- except AttributeError:
- pass
- try:
- self._acquire_restore = lock._acquire_restore
- except AttributeError:
- pass
- try:
- self._is_owned = lock._is_owned
- except AttributeError:
- pass
- self.__waiters = []
-
- def __repr__(self):
- return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
-
- def _release_save(self):
- self.__lock.release() # No state to save
-
- def _acquire_restore(self, x):
- self.__lock.acquire() # Ignore saved state
-
- def _is_owned(self):
- if self.__lock.acquire(0):
- self.__lock.release()
- return 0
- else:
- return 1
-
- def wait(self, timeout=None):
- me = currentThread()
- assert self._is_owned(), "wait() of un-acquire()d lock"
- waiter = _allocate_lock()
- waiter.acquire()
- self.__waiters.append(waiter)
- saved_state = self._release_save()
- if timeout is None:
- waiter.acquire()
- if __debug__:
- self._note("%s.wait(): got it", self)
- else:
- endtime = _time() + timeout
- delay = 0.000001 # 1 usec
- while 1:
- gotit = waiter.acquire(0)
- if gotit or _time() >= endtime:
- break
- _sleep(delay)
- if delay < 1.0:
- delay = delay * 2.0
- if not gotit:
- if __debug__:
- self._note("%s.wait(%s): timed out", self, timeout)
- try:
- self.__waiters.remove(waiter)
- except ValueError:
- pass
- else:
- if __debug__:
- self._note("%s.wait(%s): got it", self, timeout)
- self._acquire_restore(saved_state)
-
- def notify(self, n=1):
- me = currentThread()
- assert self._is_owned(), "notify() of un-acquire()d lock"
- __waiters = self.__waiters
- waiters = __waiters[:n]
- if not waiters:
- if __debug__:
- self._note("%s.notify(): no waiters", self)
- return
- self._note("%s.notify(): notifying %d waiter%s", self, n,
- n!=1 and "s" or "")
- for waiter in waiters:
- waiter.release()
- try:
- __waiters.remove(waiter)
- except ValueError:
- pass
-
- def notifyAll(self):
- self.notify(len(self.__waiters))
-
-
-def Semaphore(*args, **kwargs):
- return apply(_Semaphore, args, kwargs)
-
-class _Semaphore(_Verbose):
-
- # After Tim Peters' semaphore class, but not quite the same (no maximum)
-
- def __init__(self, value=1, verbose=None):
- assert value >= 0, "Semaphore initial value must be >= 0"
- _Verbose.__init__(self, verbose)
- self.__cond = Condition(Lock())
- self.__value = value
-
- def acquire(self, blocking=1):
- rc = 0
- self.__cond.acquire()
- while self.__value == 0:
- if not blocking:
- break
- self.__cond.wait()
- else:
- self.__value = self.__value - 1
- rc = 1
- self.__cond.release()
- return rc
-
- def release(self):
- self.__cond.acquire()
- self.__value = self.__value + 1
- self.__cond.notify()
- self.__cond.release()
-
-
-def Event(*args, **kwargs):
- return apply(_Event, args, kwargs)
-
-class _Event(_Verbose):
-
- # After Tim Peters' event class (without is_posted())
-
- def __init__(self, verbose=None):
- _Verbose.__init__(self, verbose)
- self.__cond = Condition(Lock())
- self.__flag = 0
-
- def isSet(self):
- return self.__flag
-
- def set(self):
- self.__cond.acquire()
- self.__flag = 1
- self.__cond.notifyAll()
- self.__cond.release()
-
- def clear(self):
- self.__cond.acquire()
- self.__flag = 0
- self.__cond.release()
-
- def wait(self, timeout=None):
- self.__cond.acquire()
- if not self.__flag:
- self.__cond.wait(timeout)
- self.__cond.release()
-
-
-# Helper to generate new thread names
-_counter = 0
-def _newname(template="Thread-%d"):
- global _counter
- _counter = _counter + 1
- return template % _counter
-
-# Active thread administration
-_active_limbo_lock = _allocate_lock()
-_active = {}
-_limbo = {}
-
-
-# Main class for threads
-
-class Thread(_Verbose):
-
- __initialized = 0
-
- def __init__(self, group=None, target=None, name=None,
- args=(), kwargs={}, verbose=None):
- assert group is None, "group argument must be None for now"
- _Verbose.__init__(self, verbose)
- self.__target = target
- self.__name = str(name or _newname())
- self.__args = args
- self.__kwargs = kwargs
- self.__daemonic = self._set_daemon()
- self.__started = 0
- self.__stopped = 0
- self.__block = Condition(Lock())
- self.__initialized = 1
-
- def _set_daemon(self):
- # Overridden in _MainThread and _DummyThread
- return currentThread().isDaemon()
-
- def __repr__(self):
- assert self.__initialized, "Thread.__init__() was not called"
- status = "initial"
- if self.__started:
- status = "started"
- if self.__stopped:
- status = "stopped"
- if self.__daemonic:
- status = status + " daemon"
- return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
-
- def start(self):
- assert self.__initialized, "Thread.__init__() not called"
- assert not self.__started, "thread already started"
- if __debug__:
- self._note("%s.start(): starting thread", self)
- _active_limbo_lock.acquire()
- _limbo[self] = self
- _active_limbo_lock.release()
- _start_new_thread(self.__bootstrap, ())
- self.__started = 1
- _sleep(0.000001) # 1 usec, to let the thread run (Solaris hack)
-
- def run(self):
- if self.__target:
- apply(self.__target, self.__args, self.__kwargs)
-
- def __bootstrap(self):
- try:
- self.__started = 1
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- del _limbo[self]
- _active_limbo_lock.release()
- if __debug__:
- self._note("%s.__bootstrap(): thread started", self)
- try:
- self.run()
- except SystemExit:
- if __debug__:
- self._note("%s.__bootstrap(): raised SystemExit", self)
- except:
- if __debug__:
- self._note("%s.__bootstrap(): unhandled exception", self)
- s = _StringIO()
- _print_exc(file=s)
- _sys.stderr.write("Exception in thread %s:\n%s\n" %
- (self.getName(), s.getvalue()))
- else:
- if __debug__:
- self._note("%s.__bootstrap(): normal return", self)
- finally:
- self.__stop()
- self.__delete()
-
- def __stop(self):
- self.__block.acquire()
- self.__stopped = 1
- self.__block.notifyAll()
- self.__block.release()
-
- def __delete(self):
- _active_limbo_lock.acquire()
- del _active[_get_ident()]
- _active_limbo_lock.release()
-
- def join(self, timeout=None):
- assert self.__initialized, "Thread.__init__() not called"
- assert self.__started, "cannot join thread before it is started"
- assert self is not currentThread(), "cannot join current thread"
- if __debug__:
- if not self.__stopped:
- self._note("%s.join(): waiting until thread stops", self)
- self.__block.acquire()
- if timeout is None:
- while not self.__stopped:
- self.__block.wait()
- if __debug__:
- self._note("%s.join(): thread stopped", self)
- else:
- deadline = _time() + timeout
- while not self.__stopped:
- delay = deadline - _time()
- if delay <= 0:
- if __debug__:
- self._note("%s.join(): timed out", self)
- break
- self.__block.wait(delay)
- else:
- if __debug__:
- self._note("%s.join(): thread stopped", self)
- self.__block.release()
-
- def getName(self):
- assert self.__initialized, "Thread.__init__() not called"
- return self.__name
-
- def setName(self, name):
- assert self.__initialized, "Thread.__init__() not called"
- self.__name = str(name)
-
- def isAlive(self):
- assert self.__initialized, "Thread.__init__() not called"
- return self.__started and not self.__stopped
-
- def isDaemon(self):
- assert self.__initialized, "Thread.__init__() not called"
- return self.__daemonic
-
- def setDaemon(self, daemonic):
- assert self.__initialized, "Thread.__init__() not called"
- assert not self.__started, "cannot set daemon status of active thread"
- self.__daemonic = daemonic
-
-
-# Special thread class to represent the main thread
-# This is garbage collected through an exit handler
-
-class _MainThread(Thread):
-
- def __init__(self):
- Thread.__init__(self, name="MainThread")
- self._Thread__started = 1
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- _active_limbo_lock.release()
- import atexit
- atexit.register(self.__exitfunc)
-
- def _set_daemon(self):
- return 0
-
- def __exitfunc(self):
- self._Thread__stop()
- t = _pickSomeNonDaemonThread()
- if t:
- if __debug__:
- self._note("%s: waiting for other threads", self)
- while t:
- t.join()
- t = _pickSomeNonDaemonThread()
- if __debug__:
- self._note("%s: exiting", self)
- self._Thread__delete()
-
-def _pickSomeNonDaemonThread():
- for t in enumerate():
- if not t.isDaemon() and t.isAlive():
- return t
- return None
-
-
-# Dummy thread class to represent threads not started here.
-# These aren't garbage collected when they die,
-# nor can they be waited for.
-# Their purpose is to return *something* from currentThread().
-# They are marked as daemon threads so we won't wait for them
-# when we exit (conform previous semantics).
-
-class _DummyThread(Thread):
-
- def __init__(self):
- Thread.__init__(self, name=_newname("Dummy-%d"))
- self._Thread__started = 1
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- _active_limbo_lock.release()
-
- def _set_daemon(self):
- return 1
-
- def join(self):
- assert 0, "cannot join a dummy thread"
-
-
-# Global API functions
-
-def currentThread():
- try:
- return _active[_get_ident()]
- except KeyError:
- print "currentThread(): no current thread for", _get_ident()
- return _DummyThread()
-
-def activeCount():
- _active_limbo_lock.acquire()
- count = len(_active) + len(_limbo)
- _active_limbo_lock.release()
- return count
-
-def enumerate():
- _active_limbo_lock.acquire()
- active = _active.values() + _limbo.values()
- _active_limbo_lock.release()
- return active
-
-
-# Create the main thread object
-
-_MainThread()
-
-
-# Self-test code
-
-def _test():
-
- import random
-
- class BoundedQueue(_Verbose):
-
- def __init__(self, limit):
- _Verbose.__init__(self)
- self.mon = RLock()
- self.rc = Condition(self.mon)
- self.wc = Condition(self.mon)
- self.limit = limit
- self.queue = []
-
- def put(self, item):
- self.mon.acquire()
- while len(self.queue) >= self.limit:
- self._note("put(%s): queue full", item)
- self.wc.wait()
- self.queue.append(item)
- self._note("put(%s): appended, length now %d",
- item, len(self.queue))
- self.rc.notify()
- self.mon.release()
-
- def get(self):
- self.mon.acquire()
- while not self.queue:
- self._note("get(): queue empty")
- self.rc.wait()
- item = self.queue[0]
- del self.queue[0]
- self._note("get(): got %s, %d left", item, len(self.queue))
- self.wc.notify()
- self.mon.release()
- return item
-
- class ProducerThread(Thread):
-
- def __init__(self, queue, quota):
- Thread.__init__(self, name="Producer")
- self.queue = queue
- self.quota = quota
-
- def run(self):
- from random import random
- counter = 0
- while counter < self.quota:
- counter = counter + 1
- self.queue.put("%s.%d" % (self.getName(), counter))
- _sleep(random() * 0.00001)
-
-
- class ConsumerThread(Thread):
-
- def __init__(self, queue, count):
- Thread.__init__(self, name="Consumer")
- self.queue = queue
- self.count = count
-
- def run(self):
- while self.count > 0:
- item = self.queue.get()
- print item
- self.count = self.count - 1
-
- import time
-
- NP = 3
- QL = 4
- NI = 5
-
- Q = BoundedQueue(QL)
- P = []
- for i in range(NP):
- t = ProducerThread(Q, NI)
- t.setName("Producer-%d" % (i+1))
- P.append(t)
- C = ConsumerThread(Q, NI*NP)
- for t in P:
- t.start()
- _sleep(0.000001)
- C.start()
- for t in P:
- t.join()
- C.join()
-
-if __name__ == '__main__':
- _test()
diff --git a/Lib/dos-8x3/tokenize.py b/Lib/dos-8x3/tokenize.py
deleted file mode 100644
index 4883668..0000000
--- a/Lib/dos-8x3/tokenize.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Tests for the 'tokenize' module.
-# Large bits stolen from test_grammar.py.
-
-# Comments
-"#"
-#'
-#"
-#\
- #
- # abc
-'''#
-#'''
-
-x = 1 #
-
-# Balancing continuation
-
-a = (3, 4,
- 5, 6)
-y = [3, 4,
- 5]
-z = {'a':5,
- 'b':6}
-x = (len(`y`) + 5*x - a[
- 3 ]
- - x + len({
- }
- )
- )
-
-# Backslash means line continuation:
-x = 1 \
-+ 1
-
-# Backslash does not means continuation in comments :\
-x = 0
-
-# Ordinary integers
-0xff <> 255
-0377 <> 255
-2147483647 != 017777777777
--2147483647-1 != 020000000000
-037777777777 != -1
-0xffffffff != -1
-
-# Long integers
-x = 0L
-x = 0l
-x = 0xffffffffffffffffL
-x = 0xffffffffffffffffl
-x = 077777777777777777L
-x = 077777777777777777l
-x = 123456789012345678901234567890L
-x = 123456789012345678901234567890l
-
-# Floating-point numbers
-x = 3.14
-x = 314.
-x = 0.314
-# XXX x = 000.314
-x = .314
-x = 3e14
-x = 3E14
-x = 3e-14
-x = 3e+14
-x = 3.e14
-x = .3e14
-x = 3.1e4
-
-# String literals
-x = ''; y = "";
-x = '\''; y = "'";
-x = '"'; y = "\"";
-x = "doesn't \"shrink\" does it"
-y = 'doesn\'t "shrink" does it'
-x = "does \"shrink\" doesn't it"
-y = 'does "shrink" doesn\'t it'
-x = """
-The "quick"
-brown fox
-jumps over
-the 'lazy' dog.
-"""
-y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
-y = '''
-The "quick"
-brown fox
-jumps over
-the 'lazy' dog.
-''';
-y = "\n\
-The \"quick\"\n\
-brown fox\n\
-jumps over\n\
-the 'lazy' dog.\n\
-";
-y = '\n\
-The \"quick\"\n\
-brown fox\n\
-jumps over\n\
-the \'lazy\' dog.\n\
-';
-x = r'\\' + R'\\'
-x = r'\'' + ''
-y = r'''
-foo bar \\
-baz''' + R'''
-foo'''
-y = r"""foo
-bar \\ baz
-""" + R'''spam
-'''
-
-# Indentation
-if 1:
- x = 2
-if 1:
- x = 2
-if 1:
- while 0:
- if 0:
- x = 2
- x = 2
-if 0:
- if 2:
- while 0:
- if 1:
- x = 2
-
-# Operators
-
-def d22(a, b, c=1, d=2): pass
-def d01v(a=1, *rest, **rest): pass
-
-(x, y) <> ({'a':1}, {'b':2})
-
-# comparison
-if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
-
-# binary
-x = 1 & 1
-x = 1 ^ 1
-x = 1 | 1
-
-# shift
-x = 1 << 1 >> 1
-
-# additive
-x = 1 - 1 + 1 - 1 + 1
-
-# multiplicative
-x = 1 / 1 * 1 % 1
-
-# unary
-x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
-x = -1*1/1 + 1*1 - ---1*1
-
-# selector
-import sys, time
-x = sys.modules['time'].time()
-
diff --git a/Lib/dos-8x3/tracebac.py b/Lib/dos-8x3/tracebac.py
deleted file mode 100755
index b733598..0000000
--- a/Lib/dos-8x3/tracebac.py
+++ /dev/null
@@ -1,274 +0,0 @@
-"""Extract, format and print information about Python stack traces."""
-
-import linecache
-import string
-import sys
-import types
-
-def _print(file, str='', terminator='\n'):
- file.write(str+terminator)
-
-
-def print_list(extracted_list, file=None):
- """Print the list of tuples as returned by extract_tb() or
- extract_stack() as a formatted stack trace to the given file."""
- if not file:
- file = sys.stderr
- for filename, lineno, name, line in extracted_list:
- _print(file,
- ' File "%s", line %d, in %s' % (filename,lineno,name))
- if line:
- _print(file, ' %s' % string.strip(line))
-
-def format_list(extracted_list):
- """Given a list of tuples as returned by extract_tb() or
- extract_stack(), return a list of strings ready for printing.
- Each string in the resulting list corresponds to the item with
- the same index in the argument list. Each string ends in a
- newline; the strings may contain internal newlines as well, for
- those items whose source text line is not None."""
- list = []
- for filename, lineno, name, line in extracted_list:
- item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
- if line:
- item = item + ' %s\n' % string.strip(line)
- list.append(item)
- return list
-
-
-def print_tb(tb, limit=None, file=None):
- """Print up to 'limit' stack trace entries from the traceback 'tb'.
- If 'limit' is omitted or None, all entries are printed. If 'file' is
- omitted or None, the output goes to sys.stderr; otherwise 'file'
- should be an open file or file-like object with a write() method."""
- if not file:
- file = sys.stderr
- if limit is None:
- if hasattr(sys, 'tracebacklimit'):
- limit = sys.tracebacklimit
- n = 0
- while tb is not None and (limit is None or n < limit):
- f = tb.tb_frame
- lineno = tb_lineno(tb)
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- _print(file,
- ' File "%s", line %d, in %s' % (filename,lineno,name))
- line = linecache.getline(filename, lineno)
- if line: _print(file, ' ' + string.strip(line))
- tb = tb.tb_next
- n = n+1
-
-def format_tb(tb, limit = None):
- """A shorthand for 'format_list(extract_stack(f, limit))."""
- return format_list(extract_tb(tb, limit))
-
-def extract_tb(tb, limit = None):
- """Return a list of up to 'limit' pre-processed stack trace entries
- extracted from the traceback object 'traceback'. This is useful for
- alternate formatting of stack traces. If 'limit' is omitted or None,
- all entries are extracted. A pre-processed stack trace entry is a
- quadruple (filename, line number, function name, text) representing
- the information that is usually printed for a stack trace. The text
- is a string with leading and trailing whitespace stripped; if the
- source is not available it is None."""
- if limit is None:
- if hasattr(sys, 'tracebacklimit'):
- limit = sys.tracebacklimit
- list = []
- n = 0
- while tb is not None and (limit is None or n < limit):
- f = tb.tb_frame
- lineno = tb_lineno(tb)
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- line = linecache.getline(filename, lineno)
- if line: line = string.strip(line)
- else: line = None
- list.append((filename, lineno, name, line))
- tb = tb.tb_next
- n = n+1
- return list
-
-
-def print_exception(etype, value, tb, limit=None, file=None):
- """Print exception information and up to 'limit' stack trace entries
- from the traceback 'tb' to 'file'. This differs from print_tb() in
- the following ways: (1) if traceback is not None, it prints a header
- "Traceback (most recent call last):"; (2) it prints the exception type and
- value after the stack trace; (3) if type is SyntaxError and value has
- the appropriate format, it prints the line where the syntax error
- occurred with a caret on the next line indicating the approximate
- position of the error."""
- if not file:
- file = sys.stderr
- if tb:
- _print(file, 'Traceback (most recent call last):')
- print_tb(tb, limit, file)
- lines = format_exception_only(etype, value)
- for line in lines[:-1]:
- _print(file, line, ' ')
- _print(file, lines[-1], '')
-
-def format_exception(etype, value, tb, limit = None):
- """Format a stack trace and the exception information. The arguments
- have the same meaning as the corresponding arguments to
- print_exception(). The return value is a list of strings, each
- ending in a newline and some containing internal newlines. When
- these lines are concatenated and printed, exactly the same text is
- printed as does print_exception()."""
- if tb:
- list = ['Traceback (most recent call last):\n']
- list = list + format_tb(tb, limit)
- else:
- list = []
- list = list + format_exception_only(etype, value)
- return list
-
-def format_exception_only(etype, value):
- """Format the exception part of a traceback. The arguments are the
- exception type and value such as given by sys.last_type and
- sys.last_value. The return value is a list of strings, each ending
- in a newline. Normally, the list contains a single string;
- however, for SyntaxError exceptions, it contains several lines that
- (when printed) display detailed information about where the syntax
- error occurred. The message indicating which exception occurred is
- the always last string in the list."""
- list = []
- if type(etype) == types.ClassType:
- stype = etype.__name__
- else:
- stype = etype
- if value is None:
- list.append(str(stype) + '\n')
- else:
- if etype is SyntaxError:
- try:
- msg, (filename, lineno, offset, line) = value
- except:
- pass
- else:
- if not filename: filename = "<string>"
- list.append(' File "%s", line %d\n' %
- (filename, lineno))
- i = 0
- while i < len(line) and \
- line[i] in string.whitespace:
- i = i+1
- list.append(' %s\n' % string.strip(line))
- s = ' '
- for c in line[i:offset-1]:
- if c in string.whitespace:
- s = s + c
- else:
- s = s + ' '
- list.append('%s^\n' % s)
- value = msg
- list.append('%s: %s\n' % (str(stype), _some_str(value)))
- return list
-
-def _some_str(value):
- try:
- return str(value)
- except:
- return '<unprintable %s object>' % type(value).__name__
-
-
-def print_exc(limit=None, file=None):
- """This is a shorthand for 'print_exception(sys.exc_type,
- sys.exc_value, sys.exc_traceback, limit, file)'.
- (In fact, it uses sys.exc_info() to retrieve the same information
- in a thread-safe way.)"""
- if not file:
- file = sys.stderr
- try:
- etype, value, tb = sys.exc_info()
- print_exception(etype, value, tb, limit, file)
- finally:
- etype = value = tb = None
-
-def print_last(limit=None, file=None):
- """This is a shorthand for 'print_exception(sys.last_type,
- sys.last_value, sys.last_traceback, limit, file)'."""
- if not file:
- file = sys.stderr
- print_exception(sys.last_type, sys.last_value, sys.last_traceback,
- limit, file)
-
-
-def print_stack(f=None, limit=None, file=None):
- """This function prints a stack trace from its invocation point.
- The optional 'f' argument can be used to specify an alternate stack
- frame at which to start. The optional 'limit' and 'file' arguments
- have the same meaning as for print_exception()."""
- if f is None:
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- print_list(extract_stack(f, limit), file)
-
-def format_stack(f=None, limit=None):
- """A shorthand for 'format_list(extract_stack(f, limit))'."""
- if f is None:
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- return format_list(extract_stack(f, limit))
-
-def extract_stack(f=None, limit = None):
- """Extract the raw traceback from the current stack frame. The
- return value has the same format as for extract_tb(). The optional
- 'f' and 'limit' arguments have the same meaning as for print_stack().
- Each item in the list is a quadruple (filename, line number,
- function name, text), and the entries are in order from oldest
- to newest stack frame."""
- if f is None:
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- if limit is None:
- if hasattr(sys, 'tracebacklimit'):
- limit = sys.tracebacklimit
- list = []
- n = 0
- while f is not None and (limit is None or n < limit):
- lineno = f.f_lineno # XXX Too bad if -O is used
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- line = linecache.getline(filename, lineno)
- if line: line = string.strip(line)
- else: line = None
- list.append((filename, lineno, name, line))
- f = f.f_back
- n = n+1
- list.reverse()
- return list
-
-def tb_lineno(tb):
- """Calculate the correct line number of the traceback given in tb
- (even with -O on)."""
-
- # Coded by Marc-Andre Lemburg from the example of PyCode_Addr2Line()
- # in compile.c.
- # Revised version by Jim Hugunin to work with JPython too.
-
- c = tb.tb_frame.f_code
- if not hasattr(c, 'co_lnotab'):
- return tb.tb_lineno
-
- tab = c.co_lnotab
- line = c.co_firstlineno
- stopat = tb.tb_lasti
- addr = 0
- for i in range(0, len(tab), 2):
- addr = addr + ord(tab[i])
- if addr > stopat:
- break
- line = line + ord(tab[i+1])
- return line
diff --git a/Lib/dos-8x3/userdict.py b/Lib/dos-8x3/userdict.py
deleted file mode 100755
index 9b6e73b..0000000
--- a/Lib/dos-8x3/userdict.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""A more or less complete user-defined wrapper around dictionary objects."""
-
-class UserDict:
- def __init__(self, dict=None):
- self.data = {}
- if dict is not None: self.update(dict)
- def __repr__(self): return repr(self.data)
- def __cmp__(self, dict):
- if isinstance(dict, UserDict):
- return cmp(self.data, dict.data)
- else:
- return cmp(self.data, dict)
- def __len__(self): return len(self.data)
- def __getitem__(self, key): return self.data[key]
- def __setitem__(self, key, item): self.data[key] = item
- def __delitem__(self, key): del self.data[key]
- def clear(self): self.data.clear()
- def copy(self):
- if self.__class__ is UserDict:
- return UserDict(self.data)
- import copy
- return copy.copy(self)
- def keys(self): return self.data.keys()
- def items(self): return self.data.items()
- def values(self): return self.data.values()
- def has_key(self, key): return self.data.has_key(key)
- def update(self, dict):
- if isinstance(dict, UserDict):
- self.data.update(dict.data)
- elif isinstance(dict, type(self.data)):
- self.data.update(dict)
- else:
- for k, v in dict.items():
- self.data[k] = v
- def get(self, key, failobj=None):
- return self.data.get(key, failobj)
- def setdefault(self, key, failobj=None):
- if not self.data.has_key(key):
- self.data[key] = failobj
- return self.data[key]
diff --git a/Lib/dos-8x3/userlist.py b/Lib/dos-8x3/userlist.py
deleted file mode 100755
index e79faea..0000000
--- a/Lib/dos-8x3/userlist.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""A more or less complete user-defined wrapper around list objects."""
-
-class UserList:
- def __init__(self, initlist=None):
- self.data = []
- if initlist is not None:
- # XXX should this accept an arbitrary sequence?
- if type(initlist) == type(self.data):
- self.data[:] = initlist
- elif isinstance(initlist, UserList):
- self.data[:] = initlist.data[:]
- else:
- self.data = list(initlist)
- def __repr__(self): return repr(self.data)
- def __cmp__(self, other):
- if isinstance(other, UserList):
- return cmp(self.data, other.data)
- else:
- return cmp(self.data, other)
- def __contains__(self, item): return item in self.data
- def __len__(self): return len(self.data)
- def __getitem__(self, i): return self.data[i]
- def __setitem__(self, i, item): self.data[i] = item
- def __delitem__(self, i): del self.data[i]
- def __getslice__(self, i, j):
- i = max(i, 0); j = max(j, 0)
- return self.__class__(self.data[i:j])
- def __setslice__(self, i, j, other):
- i = max(i, 0); j = max(j, 0)
- if isinstance(other, UserList):
- self.data[i:j] = other.data
- elif isinstance(other, type(self.data)):
- self.data[i:j] = other
- else:
- self.data[i:j] = list(other)
- def __delslice__(self, i, j):
- i = max(i, 0); j = max(j, 0)
- del self.data[i:j]
- def __add__(self, other):
- if isinstance(other, UserList):
- return self.__class__(self.data + other.data)
- elif isinstance(other, type(self.data)):
- return self.__class__(self.data + other)
- else:
- return self.__class__(self.data + list(other))
- def __radd__(self, other):
- if isinstance(other, UserList):
- return self.__class__(other.data + self.data)
- elif isinstance(other, type(self.data)):
- return self.__class__(other + self.data)
- else:
- return self.__class__(list(other) + self.data)
- def __iadd__(self, other):
- if isinstance(other, UserList):
- self.data += other.data
- elif isinstance(other, type(self.data)):
- self.data += other
- else:
- self.data += list(other)
- return self
- def __mul__(self, n):
- return self.__class__(self.data*n)
- __rmul__ = __mul__
- def __imul__(self, n):
- self.data *= n
- return self
- def append(self, item): self.data.append(item)
- def insert(self, i, item): self.data.insert(i, item)
- def pop(self, i=-1): return self.data.pop(i)
- def remove(self, item): self.data.remove(item)
- def count(self, item): return self.data.count(item)
- def index(self, item): return self.data.index(item)
- def reverse(self): self.data.reverse()
- def sort(self, *args): apply(self.data.sort, args)
- def extend(self, other):
- if isinstance(other, UserList):
- self.data.extend(other.data)
- else:
- self.data.extend(other)
diff --git a/Lib/dos-8x3/userstri.py b/Lib/dos-8x3/userstri.py
deleted file mode 100644
index ea3d515..0000000
--- a/Lib/dos-8x3/userstri.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env python
-## vim:ts=4:et:nowrap
-"""A user-defined wrapper around string objects
-
-Note: string objects have grown methods in Python 1.6
-This module requires Python 1.6 or later.
-"""
-from types import StringType, UnicodeType
-import sys
-
-class UserString:
- def __init__(self, seq):
- if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
- self.data = seq
- elif isinstance(seq, UserString):
- self.data = seq.data[:]
- else:
- self.data = str(seq)
- def __str__(self): return str(self.data)
- def __repr__(self): return repr(self.data)
- def __int__(self): return int(self.data)
- def __long__(self): return long(self.data)
- def __float__(self): return float(self.data)
- def __complex__(self): return complex(self.data)
- def __hash__(self): return hash(self.data)
-
- def __cmp__(self, string):
- if isinstance(string, UserString):
- return cmp(self.data, string.data)
- else:
- return cmp(self.data, string)
- def __contains__(self, char):
- return char in self.data
-
- def __len__(self): return len(self.data)
- def __getitem__(self, index): return self.__class__(self.data[index])
- def __getslice__(self, start, end):
- start = max(start, 0); end = max(end, 0)
- return self.__class__(self.data[start:end])
-
- def __add__(self, other):
- if isinstance(other, UserString):
- return self.__class__(self.data + other.data)
- elif isinstance(other, StringType) or isinstance(other, UnicodeType):
- return self.__class__(self.data + other)
- else:
- return self.__class__(self.data + str(other))
- def __radd__(self, other):
- if isinstance(other, StringType) or isinstance(other, UnicodeType):
- return self.__class__(other + self.data)
- else:
- return self.__class__(str(other) + self.data)
- def __iadd__(self, other):
- if isinstance(other, UserString):
- self.data += other.data
- elif isinstance(other, StringType) or isinstance(other, UnicodeType):
- self.data += other
- else:
- self.data += str(other)
- return self
- def __mul__(self, n):
- return self.__class__(self.data*n)
- __rmul__ = __mul__
- def __imull__(self, n):
- self.data += n
- return self
-
- # the following methods are defined in alphabetical order:
- def capitalize(self): return self.__class__(self.data.capitalize())
- def center(self, width): return self.__class__(self.data.center(width))
- def count(self, sub, start=0, end=sys.maxint):
- return self.data.count(sub, start, end)
- def encode(self, encoding=None, errors=None): # XXX improve this?
- if encoding:
- if errors:
- return self.__class__(self.data.encode(encoding, errors))
- else:
- return self.__class__(self.data.encode(encoding))
- else:
- return self.__class__(self.data.encode())
- def endswith(self, suffix, start=0, end=sys.maxint):
- return self.data.endswith(suffix, start, end)
- def expandtabs(self, tabsize=8):
- return self.__class__(self.data.expandtabs(tabsize))
- def find(self, sub, start=0, end=sys.maxint):
- return self.data.find(sub, start, end)
- def index(self, sub, start=0, end=sys.maxint):
- return self.data.index(sub, start, end)
- def isalpha(self): return self.data.isalpha()
- def isalnum(self): return self.data.isalnum()
- def isdecimal(self): return self.data.isdecimal()
- def isdigit(self): return self.data.isdigit()
- def islower(self): return self.data.islower()
- def isnumeric(self): return self.data.isnumeric()
- def isspace(self): return self.data.isspace()
- def istitle(self): return self.data.istitle()
- def isupper(self): return self.data.isupper()
- def join(self, seq): return self.data.join(seq)
- def ljust(self, width): return self.__class__(self.data.ljust(width))
- def lower(self): return self.__class__(self.data.lower())
- def lstrip(self): return self.__class__(self.data.lstrip())
- def replace(self, old, new, maxsplit=-1):
- return self.__class__(self.data.replace(old, new, maxsplit))
- def rfind(self, sub, start=0, end=sys.maxint):
- return self.data.rfind(sub, start, end)
- def rindex(self, sub, start=0, end=sys.maxint):
- return self.data.rindex(sub, start, end)
- def rjust(self, width): return self.__class__(self.data.rjust(width))
- def rstrip(self): return self.__class__(self.data.rstrip())
- def split(self, sep=None, maxsplit=-1):
- return self.data.split(sep, maxsplit)
- def splitlines(self, keepends=0): return self.data.splitlines(keepends)
- def startswith(self, prefix, start=0, end=sys.maxint):
- return self.data.startswith(prefix, start, end)
- def strip(self): return self.__class__(self.data.strip())
- def swapcase(self): return self.__class__(self.data.swapcase())
- def title(self): return self.__class__(self.data.title())
- def translate(self, *args):
- return self.__class__(self.data.translate(*args))
- def upper(self): return self.__class__(self.data.upper())
-
-class MutableString(UserString):
- """mutable string objects
-
- Python strings are immutable objects. This has the advantage, that
- strings may be used as dictionary keys. If this property isn't needed
- and you insist on changing string values in place instead, you may cheat
- and use MutableString.
-
- But the purpose of this class is an educational one: to prevent
- people from inventing their own mutable string class derived
- from UserString and than forget thereby to remove (override) the
- __hash__ method inherited from ^UserString. This would lead to
- errors that would be very hard to track down.
-
- A faster and better solution is to rewrite your program using lists."""
- def __init__(self, string=""):
- self.data = string
- def __hash__(self):
- raise TypeError, "unhashable type (it is mutable)"
- def __setitem__(self, index, sub):
- if index < 0 or index >= len(self.data): raise IndexError
- self.data = self.data[:index] + sub + self.data[index+1:]
- def __delitem__(self, index):
- if index < 0 or index >= len(self.data): raise IndexError
- self.data = self.data[:index] + self.data[index+1:]
- def __setslice__(self, start, end, sub):
- start = max(start, 0); end = max(end, 0)
- if isinstance(sub, UserString):
- self.data = self.data[:start]+sub.data+self.data[end:]
- elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
- self.data = self.data[:start]+sub+self.data[end:]
- else:
- self.data = self.data[:start]+str(sub)+self.data[end:]
- def __delslice__(self, start, end):
- start = max(start, 0); end = max(end, 0)
- self.data = self.data[:start] + self.data[end:]
- def immutable(self):
- return UserString(self.data)
-
-if __name__ == "__main__":
- # execute the regression test to stdout, if called as a script:
- import os
- called_in_dir, called_as = os.path.split(sys.argv[0])
- called_in_dir = os.path.abspath(called_in_dir)
- called_as, py = os.path.splitext(called_as)
- sys.path.append(os.path.join(called_in_dir, 'test'))
- if '-q' in sys.argv:
- import test_support
- test_support.verbose = 0
- __import__('test_' + called_as.lower())
diff --git a/Lib/dos-8x3/webbrows.py b/Lib/dos-8x3/webbrows.py
deleted file mode 100644
index da70e3b..0000000
--- a/Lib/dos-8x3/webbrows.py
+++ /dev/null
@@ -1,225 +0,0 @@
-"""Remote-control interfaces to some browsers."""
-
-import os
-import sys
-
-
-PROCESS_CREATION_DELAY = 4
-
-
-class Error(Exception):
- pass
-
-
-_browsers = {}
-
-def register(name, klass, instance=None):
- """Register a browser connector and, optionally, connection."""
- _browsers[name.lower()] = [klass, instance]
-
-
-def get(name=None):
- """Retrieve a connection to a browser by type name, or the default
- browser."""
- name = name or DEFAULT_BROWSER
- try:
- L = _browsers[name.lower()]
- except KeyError:
- raise ValueError, "unknown browser type: " + `name`
- if L[1] is None:
- L[1] = L[0]()
- return L[1]
-
-
-# Please note: the following definition hides a builtin function.
-
-def open(url, new=0):
- get().open(url, new)
-
-
-def open_new(url):
- get().open_new(url)
-
-
-def _iscommand(cmd):
- """Return true if cmd can be found on the executable search path."""
- path = os.environ.get("PATH")
- if not path:
- return 0
- for d in path.split(os.pathsep):
- exe = os.path.join(d, cmd)
- if os.path.isfile(exe):
- return 1
- return 0
-
-
-class CommandLineBrowser:
- _browsers = []
- if os.environ.get("DISPLAY"):
- _browsers.extend([
- ("netscape", "netscape %s >/dev/null &"),
- ("mosaic", "mosaic %s >/dev/null &"),
- ])
- _browsers.extend([
- ("lynx", "lynx %s"),
- ("w3m", "w3m %s"),
- ])
-
- def open(self, url, new=0):
- for exe, cmd in self._browsers:
- if _iscommand(exe):
- os.system(cmd % url)
- return
- raise Error("could not locate runnable browser")
-
- def open_new(self, url):
- self.open(url)
-
-register("command-line", CommandLineBrowser)
-
-
-class Netscape:
- autoRaise = 1
-
- def _remote(self, action):
- raise_opt = ("-noraise", "-raise")[self.autoRaise]
- cmd = "netscape %s -remote '%s' >/dev/null 2>&1" % (raise_opt, action)
- rc = os.system(cmd)
- if rc:
- import time
- os.system("netscape -no-about-splash &")
- time.sleep(PROCESS_CREATION_DELAY)
- rc = os.system(cmd)
- return not rc
-
- def open(self, url, new=0):
- if new:
- self.open_new(url)
- else:
- self._remote("openURL(%s)" % url)
-
- def open_new(self, url):
- self._remote("openURL(%s, new-window)" % url)
-
-register("netscape", Netscape)
-
-
-class Konquerer:
- """Controller for the KDE File Manager (kfm, or Konquerer).
-
- See http://developer.kde.org/documentation/other/kfmclient.html
- for more information on the Konquerer remote-control interface.
-
- """
- def _remote(self, action):
- cmd = "kfmclient %s >/dev/null 2>&1" % action
- rc = os.system(cmd)
- if rc:
- import time
- os.system("kfm -d &")
- time.sleep(PROCESS_CREATION_DELAY)
- rc = os.system(cmd)
- return not rc
-
- def open(self, url, new=1):
- # XXX currently I know no way to prevent KFM from opening a new win.
- self.open_new(url)
-
- def open_new(self, url):
- self._remote("openURL %s" % url)
-
-register("kfm", Konquerer)
-
-
-class Grail:
- # There should be a way to maintain a connection to Grail, but the
- # Grail remote control protocol doesn't really allow that at this
- # point. It probably never will!
-
- def _find_grail_rc(self):
- import glob
- import pwd
- import socket
- import tempfile
- tempdir = os.path.join(tempfile.gettempdir(), ".grail-unix")
- user = pwd.getpwuid(_os.getuid())[0]
- filename = os.path.join(tempdir, user + "-*")
- maybes = glob.glob(filename)
- if not maybes:
- return None
- s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- for fn in maybes:
- # need to PING each one until we find one that's live
- try:
- s.connect(fn)
- except socket.error:
- # no good; attempt to clean it out, but don't fail:
- try:
- os.unlink(fn)
- except IOError:
- pass
- else:
- return s
-
- def _remote(self, action):
- s = self._find_grail_rc()
- if not s:
- return 0
- s.send(action)
- s.close()
- return 1
-
- def open(self, url, new=0):
- if new:
- self.open_new(url)
- else:
- self._remote("LOAD " + url)
-
- def open_new(self, url):
- self._remote("LOADNEW " + url)
-
-register("grail", Grail)
-
-
-class WindowsDefault:
- def open(self, url, new=0):
- os.startfile(url)
-
- def open_new(self, url):
- self.open(url)
-
-
-DEFAULT_BROWSER = "command-line"
-
-if sys.platform[:3] == "win":
- del _browsers["kfm"]
- register("windows-default", WindowsDefault)
- DEFAULT_BROWSER = "windows-default"
-elif os.environ.get("DISPLAY"):
- if _iscommand("netscape"):
- DEFAULT_BROWSER = "netscape"
-
-# If the $BROWSER environment variable is set and true, let that be
-# the name of the browser to use:
-#
-DEFAULT_BROWSER = os.environ.get("BROWSER") or DEFAULT_BROWSER
-
-
-# Now try to support the MacOS world. This is the only supported
-# controller on that platform, so don't mess with the default!
-
-try:
- import ic
-except ImportError:
- pass
-else:
- class InternetConfig:
- def open(self, url, new=0):
- ic.launcurl(url)
-
- def open_new(self, url):
- self.open(url)
-
- _browsers.clear()
- register("internet-config", InternetConfig)
- DEFAULT_BROWSER = "internet-config"