summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rw-r--r--Lib/lib-old/Para.py343
-rw-r--r--Lib/lib-old/addpack.py67
-rw-r--r--Lib/lib-old/cmp.py63
-rw-r--r--Lib/lib-old/cmpcache.py64
-rw-r--r--Lib/lib-old/codehack.py81
-rw-r--r--Lib/lib-old/dircmp.py202
-rw-r--r--Lib/lib-old/dump.py63
-rw-r--r--Lib/lib-old/find.py26
-rw-r--r--Lib/lib-old/fmt.py623
-rw-r--r--Lib/lib-old/grep.py79
-rw-r--r--Lib/lib-old/lockfile.py15
-rw-r--r--Lib/lib-old/newdir.py73
-rw-r--r--Lib/lib-old/ni.py433
-rw-r--r--Lib/lib-old/packmail.py111
-rw-r--r--Lib/lib-old/poly.py52
-rw-r--r--Lib/lib-old/rand.py13
-rw-r--r--Lib/lib-old/statcache.py82
-rw-r--r--Lib/lib-old/tb.py177
-rw-r--r--Lib/lib-old/tzparse.py98
-rw-r--r--Lib/lib-old/util.py25
-rw-r--r--Lib/lib-old/whatsound.py1
-rw-r--r--Lib/lib-old/whrandom.py144
-rw-r--r--Lib/lib-old/zmod.py94
-rwxr-xr-xLib/reconvert.py192
-rw-r--r--Lib/regex_syntax.py53
-rw-r--r--Lib/regsub.py198
-rw-r--r--Lib/rexec.py2
-rw-r--r--Lib/test/test___all__.py2
-rw-r--r--Lib/test/test_regex.py113
-rw-r--r--Lib/test/test_sundry.py1
30 files changed, 1 insertions, 3489 deletions
diff --git a/Lib/lib-old/Para.py b/Lib/lib-old/Para.py
deleted file mode 100644
index 2fd8dc6..0000000
--- a/Lib/lib-old/Para.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# Text formatting abstractions
-# Note -- this module is obsolete, it's too slow anyway
-
-
-# Oft-used type object
-Int = type(0)
-
-
-# Represent a paragraph. This is a list of words with associated
-# font and size information, plus indents and justification for the
-# entire paragraph.
-# Once the words have been added to a paragraph, it can be laid out
-# for different line widths. Once laid out, it can be rendered at
-# different screen locations. Once rendered, it can be queried
-# for mouse hits, and parts of the text can be highlighted
-class Para:
- #
- def __init__(self):
- self.words = [] # The words
- self.just = 'l' # Justification: 'l', 'r', 'lr' or 'c'
- self.indent_left = self.indent_right = self.indent_hang = 0
- # Final lay-out parameters, may change
- self.left = self.top = self.right = self.bottom = \
- self.width = self.height = self.lines = None
- #
- # Add a word, computing size information for it.
- # Words may also be added manually by appending to self.words
- # Each word should be a 7-tuple:
- # (font, text, width, space, stretch, ascent, descent)
- def addword(self, d, font, text, space, stretch):
- if font is not None:
- d.setfont(font)
- width = d.textwidth(text)
- ascent = d.baseline()
- descent = d.lineheight() - ascent
- spw = d.textwidth(' ')
- space = space * spw
- stretch = stretch * spw
- tuple = (font, text, width, space, stretch, ascent, descent)
- self.words.append(tuple)
- #
- # Hooks to begin and end anchors -- insert numbers in the word list!
- def bgn_anchor(self, id):
- self.words.append(id)
- #
- def end_anchor(self, id):
- self.words.append(0)
- #
- # Return the total length (width) of the text added so far, in pixels
- def getlength(self):
- total = 0
- for word in self.words:
- if type(word) is not Int:
- total = total + word[2] + word[3]
- return total
- #
- # Tab to a given position (relative to the current left indent):
- # remove all stretch, add fixed space up to the new indent.
- # If the current position is already at the tab stop,
- # don't add any new space (but still remove the stretch)
- def tabto(self, tab):
- total = 0
- as, de = 1, 0
- for i in range(len(self.words)):
- word = self.words[i]
- if type(word) is Int: continue
- (fo, te, wi, sp, st, as, de) = word
- self.words[i] = (fo, te, wi, sp, 0, as, de)
- total = total + wi + sp
- if total < tab:
- self.words.append((None, '', 0, tab-total, 0, as, de))
- #
- # Make a hanging tag: tab to hang, increment indent_left by hang,
- # and reset indent_hang to -hang
- def makehangingtag(self, hang):
- self.tabto(hang)
- self.indent_left = self.indent_left + hang
- self.indent_hang = -hang
- #
- # Decide where the line breaks will be given some screen width
- def layout(self, linewidth):
- self.width = linewidth
- height = 0
- self.lines = lines = []
- avail1 = self.width - self.indent_left - self.indent_right
- avail = avail1 - self.indent_hang
- words = self.words
- i = 0
- n = len(words)
- lastfont = None
- while i < n:
- firstfont = lastfont
- charcount = 0
- width = 0
- stretch = 0
- ascent = 0
- descent = 0
- lsp = 0
- j = i
- while i < n:
- word = words[i]
- if type(word) is Int:
- if word > 0 and width >= avail:
- break
- i = i+1
- continue
- fo, te, wi, sp, st, as, de = word
- if width + wi > avail and width > 0 and wi > 0:
- break
- if fo is not None:
- lastfont = fo
- if width == 0:
- firstfont = fo
- charcount = charcount + len(te) + (sp > 0)
- width = width + wi + sp
- lsp = sp
- stretch = stretch + st
- lst = st
- ascent = max(ascent, as)
- descent = max(descent, de)
- i = i+1
- while i > j and type(words[i-1]) is Int and \
- words[i-1] > 0: i = i-1
- width = width - lsp
- if i < n:
- stretch = stretch - lst
- else:
- stretch = 0
- tuple = i-j, firstfont, charcount, width, stretch, \
- ascent, descent
- lines.append(tuple)
- height = height + ascent + descent
- avail = avail1
- self.height = height
- #
- # Call a function for all words in a line
- def visit(self, wordfunc, anchorfunc):
- avail1 = self.width - self.indent_left - self.indent_right
- avail = avail1 - self.indent_hang
- v = self.top
- i = 0
- for tuple in self.lines:
- wordcount, firstfont, charcount, width, stretch, \
- ascent, descent = tuple
- h = self.left + self.indent_left
- if i == 0: h = h + self.indent_hang
- extra = 0
- if self.just == 'r': h = h + avail - width
- elif self.just == 'c': h = h + (avail - width) / 2
- elif self.just == 'lr' and stretch > 0:
- extra = avail - width
- v2 = v + ascent + descent
- for j in range(i, i+wordcount):
- word = self.words[j]
- if type(word) is Int:
- ok = anchorfunc(self, tuple, word, \
- h, v)
- if ok is not None: return ok
- continue
- fo, te, wi, sp, st, as, de = word
- if extra > 0 and stretch > 0:
- ex = extra * st / stretch
- extra = extra - ex
- stretch = stretch - st
- else:
- ex = 0
- h2 = h + wi + sp + ex
- ok = wordfunc(self, tuple, word, h, v, \
- h2, v2, (j==i), (j==i+wordcount-1))
- if ok is not None: return ok
- h = h2
- v = v2
- i = i + wordcount
- avail = avail1
- #
- # Render a paragraph in "drawing object" d, using the rectangle
- # given by (left, top, right) with an unspecified bottom.
- # Return the computed bottom of the text.
- def render(self, d, left, top, right):
- if self.width != right-left:
- self.layout(right-left)
- self.left = left
- self.top = top
- self.right = right
- self.bottom = self.top + self.height
- self.anchorid = 0
- try:
- self.d = d
- self.visit(self.__class__._renderword, \
- self.__class__._renderanchor)
- finally:
- self.d = None
- return self.bottom
- #
- def _renderword(self, tuple, word, h, v, h2, v2, isfirst, islast):
- if word[0] is not None: self.d.setfont(word[0])
- baseline = v + tuple[5]
- self.d.text((h, baseline - word[5]), word[1])
- if self.anchorid > 0:
- self.d.line((h, baseline+2), (h2, baseline+2))
- #
- def _renderanchor(self, tuple, word, h, v):
- self.anchorid = word
- #
- # Return which anchor(s) was hit by the mouse
- def hitcheck(self, mouseh, mousev):
- self.mouseh = mouseh
- self.mousev = mousev
- self.anchorid = 0
- self.hits = []
- self.visit(self.__class__._hitcheckword, \
- self.__class__._hitcheckanchor)
- return self.hits
- #
- def _hitcheckword(self, tuple, word, h, v, h2, v2, isfirst, islast):
- if self.anchorid > 0 and h <= self.mouseh <= h2 and \
- v <= self.mousev <= v2:
- self.hits.append(self.anchorid)
- #
- def _hitcheckanchor(self, tuple, word, h, v):
- self.anchorid = word
- #
- # Return whether the given anchor id is present
- def hasanchor(self, id):
- return id in self.words or -id in self.words
- #
- # Extract the raw text from the word list, substituting one space
- # for non-empty inter-word space, and terminating with '\n'
- def extract(self):
- text = ''
- for w in self.words:
- if type(w) is not Int:
- word = w[1]
- if w[3]: word = word + ' '
- text = text + word
- return text + '\n'
- #
- # Return which character position was hit by the mouse, as
- # an offset in the entire text as returned by extract().
- # Return None if the mouse was not in this paragraph
- def whereis(self, d, mouseh, mousev):
- if mousev < self.top or mousev > self.bottom:
- return None
- self.mouseh = mouseh
- self.mousev = mousev
- self.lastfont = None
- self.charcount = 0
- try:
- self.d = d
- return self.visit(self.__class__._whereisword, \
- self.__class__._whereisanchor)
- finally:
- self.d = None
- #
- def _whereisword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
- fo, te, wi, sp, st, as, de = word
- if fo is not None: self.lastfont = fo
- h = h1
- if isfirst: h1 = 0
- if islast: h2 = 999999
- if not (v1 <= self.mousev <= v2 and h1 <= self.mouseh <= h2):
- self.charcount = self.charcount + len(te) + (sp > 0)
- return
- if self.lastfont is not None:
- self.d.setfont(self.lastfont)
- cc = 0
- for c in te:
- cw = self.d.textwidth(c)
- if self.mouseh <= h + cw/2:
- return self.charcount + cc
- cc = cc+1
- h = h+cw
- self.charcount = self.charcount + cc
- if self.mouseh <= (h+h2) / 2:
- return self.charcount
- else:
- return self.charcount + 1
- #
- def _whereisanchor(self, tuple, word, h, v):
- pass
- #
- # Return screen position corresponding to position in paragraph.
- # Return tuple (h, vtop, vbaseline, vbottom).
- # This is more or less the inverse of whereis()
- def screenpos(self, d, pos):
- if pos < 0:
- ascent, descent = self.lines[0][5:7]
- return self.left, self.top, self.top + ascent, \
- self.top + ascent + descent
- self.pos = pos
- self.lastfont = None
- try:
- self.d = d
- ok = self.visit(self.__class__._screenposword, \
- self.__class__._screenposanchor)
- finally:
- self.d = None
- if ok is None:
- ascent, descent = self.lines[-1][5:7]
- ok = self.right, self.bottom - ascent - descent, \
- self.bottom - descent, self.bottom
- return ok
- #
- def _screenposword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
- fo, te, wi, sp, st, as, de = word
- if fo is not None: self.lastfont = fo
- cc = len(te) + (sp > 0)
- if self.pos > cc:
- self.pos = self.pos - cc
- return
- if self.pos < cc:
- self.d.setfont(self.lastfont)
- h = h1 + self.d.textwidth(te[:self.pos])
- else:
- h = h2
- ascent, descent = tuple[5:7]
- return h, v1, v1+ascent, v2
- #
- def _screenposanchor(self, tuple, word, h, v):
- pass
- #
- # Invert the stretch of text between pos1 and pos2.
- # If pos1 is None, the beginning is implied;
- # if pos2 is None, the end is implied.
- # Undoes its own effect when called again with the same arguments
- def invert(self, d, pos1, pos2):
- if pos1 is None:
- pos1 = self.left, self.top, self.top, self.top
- else:
- pos1 = self.screenpos(d, pos1)
- if pos2 is None:
- pos2 = self.right, self.bottom,self.bottom,self.bottom
- else:
- pos2 = self.screenpos(d, pos2)
- h1, top1, baseline1, bottom1 = pos1
- h2, top2, baseline2, bottom2 = pos2
- if bottom1 <= top2:
- d.invert((h1, top1), (self.right, bottom1))
- h1 = self.left
- if bottom1 < top2:
- d.invert((h1, bottom1), (self.right, top2))
- top1, bottom1 = top2, bottom2
- d.invert((h1, top1), (h2, bottom2))
diff --git a/Lib/lib-old/addpack.py b/Lib/lib-old/addpack.py
deleted file mode 100644
index 2fb2601..0000000
--- a/Lib/lib-old/addpack.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This module provides standard support for "packages".
-#
-# The idea is that large groups of related modules can be placed in
-# their own subdirectory, which can be added to the Python search path
-# in a relatively easy way.
-#
-# The current version takes a package name and searches the Python
-# search path for a directory by that name, and if found adds it to
-# the module search path (sys.path). It maintains a list of packages
-# that have already been added so adding the same package many times
-# is OK.
-#
-# It is intended to be used in a fairly stylized manner: each module
-# that wants to use a particular package, say 'Foo', is supposed to
-# contain the following code:
-#
-# from addpack import addpack
-# addpack('Foo')
-# <import modules from package Foo>
-#
-# Additional arguments, when present, provide additional places where
-# to look for the package before trying sys.path (these may be either
-# strings or lists/tuples of strings). Also, if the package name is a
-# full pathname, first the last component is tried in the usual way,
-# then the full pathname is tried last. If the package name is a
-# *relative* pathname (UNIX: contains a slash but doesn't start with
-# one), then nothing special is done. The packages "/foo/bar/bletch"
-# and "bletch" are considered the same, but unrelated to "bar/bletch".
-#
-# If the algorithm finds more than one suitable subdirectory, all are
-# added to the search path -- this makes it possible to override part
-# of a package. The same path will not be added more than once.
-#
-# If no directory is found, ImportError is raised.
-
-_packs = {} # {pack: [pathname, ...], ...}
-
-def addpack(pack, *locations):
- import os
- if os.path.isabs(pack):
- base = os.path.basename(pack)
- else:
- base = pack
- if _packs.has_key(base):
- return
- import sys
- path = []
- for loc in _flatten(locations) + sys.path:
- fn = os.path.join(loc, base)
- if fn not in path and os.path.isdir(fn):
- path.append(fn)
- if pack != base and pack not in path and os.path.isdir(pack):
- path.append(pack)
- if not path: raise ImportError, 'package ' + pack + ' not found'
- _packs[base] = path
- for fn in path:
- if fn not in sys.path:
- sys.path.append(fn)
-
-def _flatten(locations):
- locs = []
- for loc in locations:
- if type(loc) == type(''):
- locs.append(loc)
- else:
- locs = locs + _flatten(loc)
- return locs
diff --git a/Lib/lib-old/cmp.py b/Lib/lib-old/cmp.py
deleted file mode 100644
index 1146a25..0000000
--- a/Lib/lib-old/cmp.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Efficiently compare files, boolean outcome only (equal / not equal).
-
-Tricks (used in this order):
- - Files with identical type, size & mtime are assumed to be clones
- - Files with different type or size cannot be identical
- - We keep a cache of outcomes of earlier comparisons
- - We don't fork a process to run 'cmp' but read the files ourselves
-"""
-
-import os
-
-cache = {}
-
-def cmp(f1, f2, shallow=1):
- """Compare two files, use the cache if possible.
- Return 1 for identical files, 0 for different.
- Raise exceptions if either file could not be statted, read, etc."""
- s1, s2 = sig(os.stat(f1)), sig(os.stat(f2))
- if s1[0] != 8 or s2[0] != 8:
- # Either is a not a plain file -- always report as different
- return 0
- if shallow and s1 == s2:
- # type, size & mtime match -- report same
- return 1
- if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
- # types or sizes differ -- report different
- return 0
- # same type and size -- look in the cache
- key = (f1, f2)
- try:
- cs1, cs2, outcome = cache[key]
- # cache hit
- if s1 == cs1 and s2 == cs2:
- # cached signatures match
- return outcome
- # stale cached signature(s)
- except KeyError:
- # cache miss
- pass
- # really compare
- outcome = do_cmp(f1, f2)
- cache[key] = s1, s2, outcome
- return outcome
-
-def sig(st):
- """Return signature (i.e., type, size, mtime) from raw stat data
- 0-5: st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid
- 6-9: st_size, st_atime, st_mtime, st_ctime"""
- type = st[0] / 4096
- size = st[6]
- mtime = st[8]
- return type, size, mtime
-
-def do_cmp(f1, f2):
- """Compare two files, really."""
- bufsize = 8*1024 # Could be tuned
- fp1 = open(f1, 'rb')
- fp2 = open(f2, 'rb')
- while 1:
- b1 = fp1.read(bufsize)
- b2 = fp2.read(bufsize)
- if b1 != b2: return 0
- if not b1: return 1
diff --git a/Lib/lib-old/cmpcache.py b/Lib/lib-old/cmpcache.py
deleted file mode 100644
index 11540f8..0000000
--- a/Lib/lib-old/cmpcache.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""Efficiently compare files, boolean outcome only (equal / not equal).
-
-Tricks (used in this order):
- - Use the statcache module to avoid statting files more than once
- - Files with identical type, size & mtime are assumed to be clones
- - Files with different type or size cannot be identical
- - We keep a cache of outcomes of earlier comparisons
- - We don't fork a process to run 'cmp' but read the files ourselves
-"""
-
-import os
-from stat import *
-import statcache
-
-
-# The cache.
-#
-cache = {}
-
-
-def cmp(f1, f2, shallow=1):
- """Compare two files, use the cache if possible.
- May raise os.error if a stat or open of either fails.
- Return 1 for identical files, 0 for different.
- Raise exceptions if either file could not be statted, read, etc."""
- s1, s2 = sig(statcache.stat(f1)), sig(statcache.stat(f2))
- if not S_ISREG(s1[0]) or not S_ISREG(s2[0]):
- # Either is a not a plain file -- always report as different
- return 0
- if shallow and s1 == s2:
- # type, size & mtime match -- report same
- return 1
- if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
- # types or sizes differ -- report different
- return 0
- # same type and size -- look in the cache
- key = f1 + ' ' + f2
- if cache.has_key(key):
- cs1, cs2, outcome = cache[key]
- # cache hit
- if s1 == cs1 and s2 == cs2:
- # cached signatures match
- return outcome
- # stale cached signature(s)
- # really compare
- outcome = do_cmp(f1, f2)
- cache[key] = s1, s2, outcome
- return outcome
-
-def sig(st):
- """Return signature (i.e., type, size, mtime) from raw stat data."""
- return S_IFMT(st[ST_MODE]), st[ST_SIZE], st[ST_MTIME]
-
-def do_cmp(f1, f2):
- """Compare two files, really."""
- #print ' cmp', f1, f2 # XXX remove when debugged
- bufsize = 8*1024 # Could be tuned
- fp1 = open(f1, 'rb')
- fp2 = open(f2, 'rb')
- while 1:
- b1 = fp1.read(bufsize)
- b2 = fp2.read(bufsize)
- if b1 != b2: return 0
- if not b1: return 1
diff --git a/Lib/lib-old/codehack.py b/Lib/lib-old/codehack.py
deleted file mode 100644
index 0b5e3a1..0000000
--- a/Lib/lib-old/codehack.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# A subroutine for extracting a function name from a code object
-# (with cache)
-
-import sys
-from stat import *
-import string
-import os
-import linecache
-
-# XXX The functions getcodename() and getfuncname() are now obsolete
-# XXX as code and function objects now have a name attribute --
-# XXX co.co_name and f.func_name.
-# XXX getlineno() is now also obsolete because of the new attribute
-# XXX of code objects, co.co_firstlineno.
-
-# Extract the function or class name from a code object.
-# This is a bit of a hack, since a code object doesn't contain
-# the name directly. So what do we do:
-# - get the filename (which *is* in the code object)
-# - look in the code string to find the first SET_LINENO instruction
-# (this must be the first instruction)
-# - get the line from the file
-# - if the line starts with 'class' or 'def' (after possible whitespace),
-# extract the following identifier
-#
-# This breaks apart when the function was read from <stdin>
-# or constructed by exec(), when the file is not accessible,
-# and also when the file has been modified or when a line is
-# continued with a backslash before the function or class name.
-#
-# Because this is a pretty expensive hack, a cache is kept.
-
-SET_LINENO = 127 # The opcode (see "opcode.h" in the Python source)
-identchars = string.ascii_letters + string.digits + '_' # Identifier characters
-
-_namecache = {} # The cache
-
-def getcodename(co):
- try:
- return co.co_name
- except AttributeError:
- pass
- key = `co` # arbitrary but uniquely identifying string
- if _namecache.has_key(key): return _namecache[key]
- filename = co.co_filename
- code = co.co_code
- name = ''
- if ord(code[0]) == SET_LINENO:
- lineno = ord(code[1]) | ord(code[2]) << 8
- line = linecache.getline(filename, lineno)
- words = line.split()
- if len(words) >= 2 and words[0] in ('def', 'class'):
- name = words[1]
- for i in range(len(name)):
- if name[i] not in identchars:
- name = name[:i]
- break
- _namecache[key] = name
- return name
-
-# Use the above routine to find a function's name.
-
-def getfuncname(func):
- try:
- return func.func_name
- except AttributeError:
- pass
- return getcodename(func.func_code)
-
-# A part of the above code to extract just the line number from a code object.
-
-def getlineno(co):
- try:
- return co.co_firstlineno
- except AttributeError:
- pass
- code = co.co_code
- if ord(code[0]) == SET_LINENO:
- return ord(code[1]) | ord(code[2]) << 8
- else:
- return -1
diff --git a/Lib/lib-old/dircmp.py b/Lib/lib-old/dircmp.py
deleted file mode 100644
index 1e7bf2a..0000000
--- a/Lib/lib-old/dircmp.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""A class to build directory diff tools on."""
-
-import os
-
-import dircache
-import cmpcache
-import statcache
-from stat import *
-
-class dircmp:
- """Directory comparison class."""
-
- def new(self, a, b):
- """Initialize."""
- self.a = a
- self.b = b
- # Properties that caller may change before calling self.run():
- self.hide = [os.curdir, os.pardir] # Names never to be shown
- self.ignore = ['RCS', 'tags'] # Names ignored in comparison
-
- return self
-
- def run(self):
- """Compare everything except common subdirectories."""
- self.a_list = filter(dircache.listdir(self.a), self.hide)
- self.b_list = filter(dircache.listdir(self.b), self.hide)
- self.a_list.sort()
- self.b_list.sort()
- self.phase1()
- self.phase2()
- self.phase3()
-
- def phase1(self):
- """Compute common names."""
- self.a_only = []
- self.common = []
- for x in self.a_list:
- if x in self.b_list:
- self.common.append(x)
- else:
- self.a_only.append(x)
-
- self.b_only = []
- for x in self.b_list:
- if x not in self.common:
- self.b_only.append(x)
-
- def phase2(self):
- """Distinguish files, directories, funnies."""
- self.common_dirs = []
- self.common_files = []
- self.common_funny = []
-
- for x in self.common:
- a_path = os.path.join(self.a, x)
- b_path = os.path.join(self.b, x)
-
- ok = 1
- try:
- a_stat = statcache.stat(a_path)
- except os.error, why:
- # print 'Can\'t stat', a_path, ':', why[1]
- ok = 0
- try:
- b_stat = statcache.stat(b_path)
- except os.error, why:
- # print 'Can\'t stat', b_path, ':', why[1]
- ok = 0
-
- if ok:
- a_type = S_IFMT(a_stat[ST_MODE])
- b_type = S_IFMT(b_stat[ST_MODE])
- if a_type != b_type:
- self.common_funny.append(x)
- elif S_ISDIR(a_type):
- self.common_dirs.append(x)
- elif S_ISREG(a_type):
- self.common_files.append(x)
- else:
- self.common_funny.append(x)
- else:
- self.common_funny.append(x)
-
- def phase3(self):
- """Find out differences between common files."""
- xx = cmpfiles(self.a, self.b, self.common_files)
- self.same_files, self.diff_files, self.funny_files = xx
-
- def phase4(self):
- """Find out differences between common subdirectories.
- A new dircmp object is created for each common subdirectory,
- these are stored in a dictionary indexed by filename.
- The hide and ignore properties are inherited from the parent."""
- self.subdirs = {}
- for x in self.common_dirs:
- a_x = os.path.join(self.a, x)
- b_x = os.path.join(self.b, x)
- self.subdirs[x] = newdd = dircmp().new(a_x, b_x)
- newdd.hide = self.hide
- newdd.ignore = self.ignore
- newdd.run()
-
- def phase4_closure(self):
- """Recursively call phase4() on subdirectories."""
- self.phase4()
- for x in self.subdirs.keys():
- self.subdirs[x].phase4_closure()
-
- def report(self):
- """Print a report on the differences between a and b."""
- # Assume that phases 1 to 3 have been executed
- # Output format is purposely lousy
- print 'diff', self.a, self.b
- if self.a_only:
- print 'Only in', self.a, ':', self.a_only
- if self.b_only:
- print 'Only in', self.b, ':', self.b_only
- if self.same_files:
- print 'Identical files :', self.same_files
- if self.diff_files:
- print 'Differing files :', self.diff_files
- if self.funny_files:
- print 'Trouble with common files :', self.funny_files
- if self.common_dirs:
- print 'Common subdirectories :', self.common_dirs
- if self.common_funny:
- print 'Common funny cases :', self.common_funny
-
- def report_closure(self):
- """Print reports on self and on subdirs.
- If phase 4 hasn't been done, no subdir reports are printed."""
- self.report()
- try:
- x = self.subdirs
- except AttributeError:
- return # No subdirectories computed
- for x in self.subdirs.keys():
- print
- self.subdirs[x].report_closure()
-
- def report_phase4_closure(self):
- """Report and do phase 4 recursively."""
- self.report()
- self.phase4()
- for x in self.subdirs.keys():
- print
- self.subdirs[x].report_phase4_closure()
-
-
-def cmpfiles(a, b, common):
- """Compare common files in two directories.
- Return:
- - files that compare equal
- - files that compare different
- - funny cases (can't stat etc.)"""
-
- res = ([], [], [])
- for x in common:
- res[cmp(os.path.join(a, x), os.path.join(b, x))].append(x)
- return res
-
-
-def cmp(a, b):
- """Compare two files.
- Return:
- 0 for equal
- 1 for different
- 2 for funny cases (can't stat, etc.)"""
-
- try:
- if cmpcache.cmp(a, b): return 0
- return 1
- except os.error:
- return 2
-
-
-def filter(list, skip):
- """Return a copy with items that occur in skip removed."""
-
- result = []
- for item in list:
- if item not in skip: result.append(item)
- return result
-
-
-def demo():
- """Demonstration and testing."""
-
- import sys
- import getopt
- options, args = getopt.getopt(sys.argv[1:], 'r')
- if len(args) != 2:
- raise getopt.error, 'need exactly two args'
- dd = dircmp().new(args[0], args[1])
- dd.run()
- if ('-r', '') in options:
- dd.report_phase4_closure()
- else:
- dd.report()
-
-if __name__ == "__main__":
- demo()
diff --git a/Lib/lib-old/dump.py b/Lib/lib-old/dump.py
deleted file mode 100644
index 60bdba8..0000000
--- a/Lib/lib-old/dump.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Module 'dump'
-#
-# Print python code that reconstructs a variable.
-# This only works in certain cases.
-#
-# It works fine for:
-# - ints and floats (except NaNs and other weird things)
-# - strings
-# - compounds and lists, provided it works for all their elements
-# - imported modules, provided their name is the module name
-#
-# It works for top-level dictionaries but not for dictionaries
-# contained in other objects (could be made to work with some hassle
-# though).
-#
-# It does not work for functions (all sorts), classes, class objects,
-# windows, files etc.
-#
-# Finally, objects referenced by more than one name or contained in more
-# than one other object lose their sharing property (this is bad for
-# strings used as exception identifiers, for instance).
-
-# Dump a whole symbol table
-#
-def dumpsymtab(dict):
- for key in dict.keys():
- dumpvar(key, dict[key])
-
-# Dump a single variable
-#
-def dumpvar(name, x):
- import sys
- t = type(x)
- if t == type({}):
- print name, '= {}'
- for key in x.keys():
- item = x[key]
- if not printable(item):
- print '#',
- print name, '[', `key`, '] =', `item`
- elif t in (type(''), type(0), type(0.0), type([]), type(())):
- if not printable(x):
- print '#',
- print name, '=', `x`
- elif t == type(sys):
- print 'import', name, '#', x
- else:
- print '#', name, '=', x
-
-# check if a value is printable in a way that can be read back with input()
-#
-def printable(x):
- t = type(x)
- if t in (type(''), type(0), type(0.0)):
- return 1
- if t in (type([]), type(())):
- for item in x:
- if not printable(item):
- return 0
- return 1
- if x == {}:
- return 1
- return 0
diff --git a/Lib/lib-old/find.py b/Lib/lib-old/find.py
deleted file mode 100644
index 39ad771..0000000
--- a/Lib/lib-old/find.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import fnmatch
-import os
-
-_debug = 0
-
-_prune = ['(*)']
-
-def find(pattern, dir = os.curdir):
- list = []
- names = os.listdir(dir)
- names.sort()
- for name in names:
- if name in (os.curdir, os.pardir):
- continue
- fullname = os.path.join(dir, name)
- if fnmatch.fnmatch(name, pattern):
- list.append(fullname)
- if os.path.isdir(fullname) and not os.path.islink(fullname):
- for p in _prune:
- if fnmatch.fnmatch(name, p):
- if _debug: print "skip", `fullname`
- break
- else:
- if _debug: print "descend into", `fullname`
- list = list + find(pattern, fullname)
- return list
diff --git a/Lib/lib-old/fmt.py b/Lib/lib-old/fmt.py
deleted file mode 100644
index 997d37a..0000000
--- a/Lib/lib-old/fmt.py
+++ /dev/null
@@ -1,623 +0,0 @@
-# Text formatting abstractions
-# Note -- this module is obsolete, it's too slow anyway
-
-
-import string
-import Para
-
-
-# A formatter back-end object has one method that is called by the formatter:
-# addpara(p), where p is a paragraph object. For example:
-
-
-# Formatter back-end to do nothing at all with the paragraphs
-class NullBackEnd:
- #
- def __init__(self):
- pass
- #
- def addpara(self, p):
- pass
- #
- def bgn_anchor(self, id):
- pass
- #
- def end_anchor(self, id):
- pass
-
-
-# Formatter back-end to collect the paragraphs in a list
-class SavingBackEnd(NullBackEnd):
- #
- def __init__(self):
- self.paralist = []
- #
- def addpara(self, p):
- self.paralist.append(p)
- #
- def hitcheck(self, h, v):
- hits = []
- for p in self.paralist:
- if p.top <= v <= p.bottom:
- for id in p.hitcheck(h, v):
- if id not in hits:
- hits.append(id)
- return hits
- #
- def extract(self):
- text = ''
- for p in self.paralist:
- text = text + (p.extract())
- return text
- #
- def extractpart(self, long1, long2):
- if long1 > long2: long1, long2 = long2, long1
- para1, pos1 = long1
- para2, pos2 = long2
- text = ''
- while para1 < para2:
- ptext = self.paralist[para1].extract()
- text = text + ptext[pos1:]
- pos1 = 0
- para1 = para1 + 1
- ptext = self.paralist[para2].extract()
- return text + ptext[pos1:pos2]
- #
- def whereis(self, d, h, v):
- total = 0
- for i in range(len(self.paralist)):
- p = self.paralist[i]
- result = p.whereis(d, h, v)
- if result is not None:
- return i, result
- return None
- #
- def roundtowords(self, long1, long2):
- i, offset = long1
- text = self.paralist[i].extract()
- while offset > 0 and text[offset-1] != ' ': offset = offset-1
- long1 = i, offset
- #
- i, offset = long2
- text = self.paralist[i].extract()
- n = len(text)
- while offset < n-1 and text[offset] != ' ': offset = offset+1
- long2 = i, offset
- #
- return long1, long2
- #
- def roundtoparagraphs(self, long1, long2):
- long1 = long1[0], 0
- long2 = long2[0], len(self.paralist[long2[0]].extract())
- return long1, long2
-
-
-# Formatter back-end to send the text directly to the drawing object
-class WritingBackEnd(NullBackEnd):
- #
- def __init__(self, d, width):
- self.d = d
- self.width = width
- self.lineno = 0
- #
- def addpara(self, p):
- self.lineno = p.render(self.d, 0, self.lineno, self.width)
-
-
-# A formatter receives a stream of formatting instructions and assembles
-# these into a stream of paragraphs on to a back-end. The assembly is
-# parametrized by a text measurement object, which must match the output
-# operations of the back-end. The back-end is responsible for splitting
-# paragraphs up in lines of a given maximum width. (This is done because
-# in a windowing environment, when the window size changes, there is no
-# need to redo the assembly into paragraphs, but the splitting into lines
-# must be done taking the new window size into account.)
-
-
-# Formatter base class. Initialize it with a text measurement object,
-# which is used for text measurements, and a back-end object,
-# which receives the completed paragraphs. The formatting methods are:
-# setfont(font)
-# setleftindent(nspaces)
-# setjust(type) where type is 'l', 'c', 'r', or 'lr'
-# flush()
-# vspace(nlines)
-# needvspace(nlines)
-# addword(word, nspaces)
-class BaseFormatter:
- #
- def __init__(self, d, b):
- # Drawing object used for text measurements
- self.d = d
- #
- # BackEnd object receiving completed paragraphs
- self.b = b
- #
- # Parameters of the formatting model
- self.leftindent = 0
- self.just = 'l'
- self.font = None
- self.blanklines = 0
- #
- # Parameters derived from the current font
- self.space = d.textwidth(' ')
- self.line = d.lineheight()
- self.ascent = d.baseline()
- self.descent = self.line - self.ascent
- #
- # Parameter derived from the default font
- self.n_space = self.space
- #
- # Current paragraph being built
- self.para = None
- self.nospace = 1
- #
- # Font to set on the next word
- self.nextfont = None
- #
- def newpara(self):
- return Para.Para()
- #
- def setfont(self, font):
- if font is None: return
- self.font = self.nextfont = font
- d = self.d
- d.setfont(font)
- self.space = d.textwidth(' ')
- self.line = d.lineheight()
- self.ascent = d.baseline()
- self.descent = self.line - self.ascent
- #
- def setleftindent(self, nspaces):
- self.leftindent = int(self.n_space * nspaces)
- if self.para:
- hang = self.leftindent - self.para.indent_left
- if hang > 0 and self.para.getlength() <= hang:
- self.para.makehangingtag(hang)
- self.nospace = 1
- else:
- self.flush()
- #
- def setrightindent(self, nspaces):
- self.rightindent = int(self.n_space * nspaces)
- if self.para:
- self.para.indent_right = self.rightindent
- self.flush()
- #
- def setjust(self, just):
- self.just = just
- if self.para:
- self.para.just = self.just
- #
- def flush(self):
- if self.para:
- self.b.addpara(self.para)
- self.para = None
- if self.font is not None:
- self.d.setfont(self.font)
- self.nospace = 1
- #
- def vspace(self, nlines):
- self.flush()
- if nlines > 0:
- self.para = self.newpara()
- tuple = None, '', 0, 0, 0, int(nlines*self.line), 0
- self.para.words.append(tuple)
- self.flush()
- self.blanklines = self.blanklines + nlines
- #
- def needvspace(self, nlines):
- self.flush() # Just to be sure
- if nlines > self.blanklines:
- self.vspace(nlines - self.blanklines)
- #
- def addword(self, text, space):
- if self.nospace and not text:
- return
- self.nospace = 0
- self.blanklines = 0
- if not self.para:
- self.para = self.newpara()
- self.para.indent_left = self.leftindent
- self.para.just = self.just
- self.nextfont = self.font
- space = int(space * self.space)
- self.para.words.append((self.nextfont, text,
- self.d.textwidth(text), space, space,
- self.ascent, self.descent))
- self.nextfont = None
- #
- def bgn_anchor(self, id):
- if not self.para:
- self.nospace = 0
- self.addword('', 0)
- self.para.bgn_anchor(id)
- #
- def end_anchor(self, id):
- if not self.para:
- self.nospace = 0
- self.addword('', 0)
- self.para.end_anchor(id)
-
-
-# Measuring object for measuring text as viewed on a tty
-class NullMeasurer:
- #
- def __init__(self):
- pass
- #
- def setfont(self, font):
- pass
- #
- def textwidth(self, text):
- return len(text)
- #
- def lineheight(self):
- return 1
- #
- def baseline(self):
- return 0
-
-
-# Drawing object for writing plain ASCII text to a file
-class FileWriter:
- #
- def __init__(self, fp):
- self.fp = fp
- self.lineno, self.colno = 0, 0
- #
- def setfont(self, font):
- pass
- #
- def text(self, (h, v), str):
- if not str: return
- if '\n' in str:
- raise ValueError, 'can\'t write \\n'
- while self.lineno < v:
- self.fp.write('\n')
- self.colno, self.lineno = 0, self.lineno + 1
- while self.lineno > v:
- # XXX This should never happen...
- self.fp.write('\033[A') # ANSI up arrow
- self.lineno = self.lineno - 1
- if self.colno < h:
- self.fp.write(' ' * (h - self.colno))
- elif self.colno > h:
- self.fp.write('\b' * (self.colno - h))
- self.colno = h
- self.fp.write(str)
- self.colno = h + len(str)
-
-
-# Formatting class to do nothing at all with the data
-class NullFormatter(BaseFormatter):
- #
- def __init__(self):
- d = NullMeasurer()
- b = NullBackEnd()
- BaseFormatter.__init__(self, d, b)
-
-
-# Formatting class to write directly to a file
-class WritingFormatter(BaseFormatter):
- #
- def __init__(self, fp, width):
- dm = NullMeasurer()
- dw = FileWriter(fp)
- b = WritingBackEnd(dw, width)
- BaseFormatter.__init__(self, dm, b)
- self.blanklines = 1
- #
- # Suppress multiple blank lines
- def needvspace(self, nlines):
- BaseFormatter.needvspace(self, min(1, nlines))
-
-
-# A "FunnyFormatter" writes ASCII text with a twist: *bold words*,
-# _italic text_ and _underlined words_, and `quoted text'.
-# It assumes that the fonts are 'r', 'i', 'b', 'u', 'q': (roman,
-# italic, bold, underline, quote).
-# Moreover, if the font is in upper case, the text is converted to
-# UPPER CASE.
-class FunnyFormatter(WritingFormatter):
- #
- def flush(self):
- if self.para: finalize(self.para)
- WritingFormatter.flush(self)
-
-
-# Surrounds *bold words* and _italic text_ in a paragraph with
-# appropriate markers, fixing the size (assuming these characters'
-# width is 1).
-openchar = \
- {'b':'*', 'i':'_', 'u':'_', 'q':'`', 'B':'*', 'I':'_', 'U':'_', 'Q':'`'}
-closechar = \
- {'b':'*', 'i':'_', 'u':'_', 'q':'\'', 'B':'*', 'I':'_', 'U':'_', 'Q':'\''}
-def finalize(para):
- oldfont = curfont = 'r'
- para.words.append(('r', '', 0, 0, 0, 0)) # temporary, deleted at end
- for i in range(len(para.words)):
- fo, te, wi = para.words[i][:3]
- if fo is not None: curfont = fo
- if curfont != oldfont:
- if closechar.has_key(oldfont):
- c = closechar[oldfont]
- j = i-1
- while j > 0 and para.words[j][1] == '': j = j-1
- fo1, te1, wi1 = para.words[j][:3]
- te1 = te1 + c
- wi1 = wi1 + len(c)
- para.words[j] = (fo1, te1, wi1) + \
- para.words[j][3:]
- if openchar.has_key(curfont) and te:
- c = openchar[curfont]
- te = c + te
- wi = len(c) + wi
- para.words[i] = (fo, te, wi) + \
- para.words[i][3:]
- if te: oldfont = curfont
- else: oldfont = 'r'
- if curfont in string.uppercase:
- te = string.upper(te)
- para.words[i] = (fo, te, wi) + para.words[i][3:]
- del para.words[-1]
-
-
-# Formatter back-end to draw the text in a window.
-# This has an option to draw while the paragraphs are being added,
-# to minimize the delay before the user sees anything.
-# This manages the entire "document" of the window.
-class StdwinBackEnd(SavingBackEnd):
- #
- def __init__(self, window, drawnow):
- self.window = window
- self.drawnow = drawnow
- self.width = window.getwinsize()[0]
- self.selection = None
- self.height = 0
- window.setorigin(0, 0)
- window.setdocsize(0, 0)
- self.d = window.begindrawing()
- SavingBackEnd.__init__(self)
- #
- def finish(self):
- self.d.close()
- self.d = None
- self.window.setdocsize(0, self.height)
- #
- def addpara(self, p):
- self.paralist.append(p)
- if self.drawnow:
- self.height = \
- p.render(self.d, 0, self.height, self.width)
- else:
- p.layout(self.width)
- p.left = 0
- p.top = self.height
- p.right = self.width
- p.bottom = self.height + p.height
- self.height = p.bottom
- #
- def resize(self):
- self.window.change((0, 0), (self.width, self.height))
- self.width = self.window.getwinsize()[0]
- self.height = 0
- for p in self.paralist:
- p.layout(self.width)
- p.left = 0
- p.top = self.height
- p.right = self.width
- p.bottom = self.height + p.height
- self.height = p.bottom
- self.window.change((0, 0), (self.width, self.height))
- self.window.setdocsize(0, self.height)
- #
- def redraw(self, area):
- d = self.window.begindrawing()
- (left, top), (right, bottom) = area
- d.erase(area)
- d.cliprect(area)
- for p in self.paralist:
- if top < p.bottom and p.top < bottom:
- v = p.render(d, p.left, p.top, p.right)
- if self.selection:
- self.invert(d, self.selection)
- d.close()
- #
- def setselection(self, new):
- if new:
- long1, long2 = new
- pos1 = long1[:3]
- pos2 = long2[:3]
- new = pos1, pos2
- if new != self.selection:
- d = self.window.begindrawing()
- if self.selection:
- self.invert(d, self.selection)
- if new:
- self.invert(d, new)
- d.close()
- self.selection = new
- #
- def getselection(self):
- return self.selection
- #
- def extractselection(self):
- if self.selection:
- a, b = self.selection
- return self.extractpart(a, b)
- else:
- return None
- #
- def invert(self, d, region):
- long1, long2 = region
- if long1 > long2: long1, long2 = long2, long1
- para1, pos1 = long1
- para2, pos2 = long2
- while para1 < para2:
- self.paralist[para1].invert(d, pos1, None)
- pos1 = None
- para1 = para1 + 1
- self.paralist[para2].invert(d, pos1, pos2)
- #
- def search(self, prog):
- import re, string
- if type(prog) is type(''):
- prog = re.compile(string.lower(prog))
- if self.selection:
- iold = self.selection[0][0]
- else:
- iold = -1
- hit = None
- for i in range(len(self.paralist)):
- if i == iold or i < iold and hit:
- continue
- p = self.paralist[i]
- text = string.lower(p.extract())
- match = prog.search(text)
- if match:
- a, b = match.group(0)
- long1 = i, a
- long2 = i, b
- hit = long1, long2
- if i > iold:
- break
- if hit:
- self.setselection(hit)
- i = hit[0][0]
- p = self.paralist[i]
- self.window.show((p.left, p.top), (p.right, p.bottom))
- return 1
- else:
- return 0
- #
- def showanchor(self, id):
- for i in range(len(self.paralist)):
- p = self.paralist[i]
- if p.hasanchor(id):
- long1 = i, 0
- long2 = i, len(p.extract())
- hit = long1, long2
- self.setselection(hit)
- self.window.show(
- (p.left, p.top), (p.right, p.bottom))
- break
-
-
-# GL extensions
-
-class GLFontCache:
- #
- def __init__(self):
- self.reset()
- self.setfont('')
- #
- def reset(self):
- self.fontkey = None
- self.fonthandle = None
- self.fontinfo = None
- self.fontcache = {}
- #
- def close(self):
- self.reset()
- #
- def setfont(self, fontkey):
- if fontkey == '':
- fontkey = 'Times-Roman 12'
- elif ' ' not in fontkey:
- fontkey = fontkey + ' 12'
- if fontkey == self.fontkey:
- return
- if self.fontcache.has_key(fontkey):
- handle = self.fontcache[fontkey]
- else:
- import string
- i = string.index(fontkey, ' ')
- name, sizestr = fontkey[:i], fontkey[i:]
- size = eval(sizestr)
- key1 = name + ' 1'
- key = name + ' ' + `size`
- # NB key may differ from fontkey!
- if self.fontcache.has_key(key):
- handle = self.fontcache[key]
- else:
- if self.fontcache.has_key(key1):
- handle = self.fontcache[key1]
- else:
- import fm
- handle = fm.findfont(name)
- self.fontcache[key1] = handle
- handle = handle.scalefont(size)
- self.fontcache[fontkey] = \
- self.fontcache[key] = handle
- self.fontkey = fontkey
- if self.fonthandle != handle:
- self.fonthandle = handle
- self.fontinfo = handle.getfontinfo()
- handle.setfont()
-
-
-class GLMeasurer(GLFontCache):
- #
- def textwidth(self, text):
- return self.fonthandle.getstrwidth(text)
- #
- def baseline(self):
- return self.fontinfo[6] - self.fontinfo[3]
- #
- def lineheight(self):
- return self.fontinfo[6]
-
-
-class GLWriter(GLFontCache):
- #
- # NOTES:
- # (1) Use gl.ortho2 to use X pixel coordinates!
- #
- def text(self, (h, v), text):
- import gl, fm
- gl.cmov2i(h, v + self.fontinfo[6] - self.fontinfo[3])
- fm.prstr(text)
- #
- def setfont(self, fontkey):
- oldhandle = self.fonthandle
- GLFontCache.setfont(fontkey)
- if self.fonthandle != oldhandle:
- handle.setfont()
-
-
-class GLMeasurerWriter(GLMeasurer, GLWriter):
- pass
-
-
-class GLBackEnd(SavingBackEnd):
- #
- def __init__(self, wid):
- import gl
- gl.winset(wid)
- self.wid = wid
- self.width = gl.getsize()[1]
- self.height = 0
- self.d = GLMeasurerWriter()
- SavingBackEnd.__init__(self)
- #
- def finish(self):
- pass
- #
- def addpara(self, p):
- self.paralist.append(p)
- self.height = p.render(self.d, 0, self.height, self.width)
- #
- def redraw(self):
- import gl
- gl.winset(self.wid)
- width = gl.getsize()[1]
- if width != self.width:
- setdocsize = 1
- self.width = width
- for p in self.paralist:
- p.top = p.bottom = None
- d = self.d
- v = 0
- for p in self.paralist:
- v = p.render(d, 0, v, width)
diff --git a/Lib/lib-old/grep.py b/Lib/lib-old/grep.py
deleted file mode 100644
index 2926746..0000000
--- a/Lib/lib-old/grep.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# 'grep'
-
-import regex
-from regex_syntax import *
-
-opt_show_where = 0
-opt_show_filename = 0
-opt_show_lineno = 1
-
-def grep(pat, *files):
- return ggrep(RE_SYNTAX_GREP, pat, files)
-
-def egrep(pat, *files):
- return ggrep(RE_SYNTAX_EGREP, pat, files)
-
-def emgrep(pat, *files):
- return ggrep(RE_SYNTAX_EMACS, pat, files)
-
-def ggrep(syntax, pat, files):
- if len(files) == 1 and type(files[0]) == type([]):
- files = files[0]
- global opt_show_filename
- opt_show_filename = (len(files) != 1)
- syntax = regex.set_syntax(syntax)
- try:
- prog = regex.compile(pat)
- finally:
- syntax = regex.set_syntax(syntax)
- for filename in files:
- fp = open(filename, 'r')
- lineno = 0
- while 1:
- line = fp.readline()
- if not line: break
- lineno = lineno + 1
- if prog.search(line) >= 0:
- showline(filename, lineno, line, prog)
- fp.close()
-
-def pgrep(pat, *files):
- if len(files) == 1 and type(files[0]) == type([]):
- files = files[0]
- global opt_show_filename
- opt_show_filename = (len(files) != 1)
- import re
- prog = re.compile(pat)
- for filename in files:
- fp = open(filename, 'r')
- lineno = 0
- while 1:
- line = fp.readline()
- if not line: break
- lineno = lineno + 1
- if prog.search(line):
- showline(filename, lineno, line, prog)
- fp.close()
-
-def showline(filename, lineno, line, prog):
- if line[-1:] == '\n': line = line[:-1]
- if opt_show_lineno:
- prefix = `lineno`.rjust(3) + ': '
- else:
- prefix = ''
- if opt_show_filename:
- prefix = filename + ': ' + prefix
- print prefix + line
- if opt_show_where:
- start, end = prog.regs()[0]
- line = line[:start]
- if '\t' not in line:
- prefix = ' ' * (len(prefix) + start)
- else:
- prefix = ' ' * len(prefix)
- for c in line:
- if c != '\t': c = ' '
- prefix = prefix + c
- if start == end: prefix = prefix + '\\'
- else: prefix = prefix + '^'*(end-start)
- print prefix
diff --git a/Lib/lib-old/lockfile.py b/Lib/lib-old/lockfile.py
deleted file mode 100644
index cde9b48..0000000
--- a/Lib/lib-old/lockfile.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import struct, fcntl
-
-def writelock(f):
- _lock(f, fcntl.F_WRLCK)
-
-def readlock(f):
- _lock(f, fcntl.F_RDLCK)
-
-def unlock(f):
- _lock(f, fcntl.F_UNLCK)
-
-def _lock(f, op):
- dummy = fcntl.fcntl(f.fileno(), fcntl.F_SETLKW,
- struct.pack('2h8l', op,
- 0, 0, 0, 0, 0, 0, 0, 0, 0))
diff --git a/Lib/lib-old/newdir.py b/Lib/lib-old/newdir.py
deleted file mode 100644
index 356becc..0000000
--- a/Lib/lib-old/newdir.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# New dir() function
-
-
-# This should be the new dir(), except that it should still list
-# the current local name space by default
-
-def listattrs(x):
- try:
- dictkeys = x.__dict__.keys()
- except (AttributeError, TypeError):
- dictkeys = []
- #
- try:
- methods = x.__methods__
- except (AttributeError, TypeError):
- methods = []
- #
- try:
- members = x.__members__
- except (AttributeError, TypeError):
- members = []
- #
- try:
- the_class = x.__class__
- except (AttributeError, TypeError):
- the_class = None
- #
- try:
- bases = x.__bases__
- except (AttributeError, TypeError):
- bases = ()
- #
- total = dictkeys + methods + members
- if the_class:
- # It's a class instace; add the class's attributes
- # that are functions (methods)...
- class_attrs = listattrs(the_class)
- class_methods = []
- for name in class_attrs:
- if is_function(getattr(the_class, name)):
- class_methods.append(name)
- total = total + class_methods
- elif bases:
- # It's a derived class; add the base class attributes
- for base in bases:
- base_attrs = listattrs(base)
- total = total + base_attrs
- total.sort()
- return total
- i = 0
- while i+1 < len(total):
- if total[i] == total[i+1]:
- del total[i+1]
- else:
- i = i+1
- return total
-
-
-# Helper to recognize functions
-
-def is_function(x):
- return type(x) == type(is_function)
-
-
-# Approximation of builtin dir(); but note that this lists the user's
-# variables by default, not the current local name space.
-
-def dir(x = None):
- if x is not None:
- return listattrs(x)
- else:
- import __main__
- return listattrs(__main__)
diff --git a/Lib/lib-old/ni.py b/Lib/lib-old/ni.py
deleted file mode 100644
index 074f989..0000000
--- a/Lib/lib-old/ni.py
+++ /dev/null
@@ -1,433 +0,0 @@
-"""New import scheme with package support.
-
-Quick Reference
----------------
-
-- To enable package support, execute "import ni" before importing any
- packages. Importing this module automatically installs the relevant
- import hooks.
-
-- To create a package named spam containing sub-modules ham, bacon and
- eggs, create a directory spam somewhere on Python's module search
- path (i.e. spam's parent directory must be one of the directories in
- sys.path or $PYTHONPATH); then create files ham.py, bacon.py and
- eggs.py inside spam.
-
-- To import module ham from package spam and use function hamneggs()
- from that module, you can either do
-
- import spam.ham # *not* "import spam" !!!
- spam.ham.hamneggs()
-
- or
-
- from spam import ham
- ham.hamneggs()
-
- or
-
- from spam.ham import hamneggs
- hamneggs()
-
-- Importing just "spam" does not do what you expect: it creates an
- empty package named spam if one does not already exist, but it does
- not import spam's submodules. The only submodule that is guaranteed
- to be imported is spam.__init__, if it exists. Note that
- spam.__init__ is a submodule of package spam. It can reference to
- spam's namespace via the '__.' prefix, for instance
-
- __.spam_inited = 1 # Set a package-level variable
-
-
-
-Theory of Operation
--------------------
-
-A Package is a module that can contain other modules. Packages can be
-nested. Package introduce dotted names for modules, like P.Q.M, which
-could correspond to a file P/Q/M.py found somewhere on sys.path. It
-is possible to import a package itself, though this makes little sense
-unless the package contains a module called __init__.
-
-A package has two variables that control the namespace used for
-packages and modules, both initialized to sensible defaults the first
-time the package is referenced.
-
-(1) A package's *module search path*, contained in the per-package
-variable __path__, defines a list of *directories* where submodules or
-subpackages of the package are searched. It is initialized to the
-directory containing the package. Setting this variable to None makes
-the module search path default to sys.path (this is not quite the same
-as setting it to sys.path, since the latter won't track later
-assignments to sys.path).
-
-(2) A package's *import domain*, contained in the per-package variable
-__domain__, defines a list of *packages* that are searched (using
-their respective module search paths) to satisfy imports. It is
-initialized to the list consisting of the package itself, its parent
-package, its parent's parent, and so on, ending with the root package
-(the nameless package containing all top-level packages and modules,
-whose module search path is None, implying sys.path).
-
-The default domain implements a search algorithm called "expanding
-search". An alternative search algorithm called "explicit search"
-fixes the import search path to contain only the root package,
-requiring the modules in the package to name all imported modules by
-their full name. The convention of using '__' to refer to the current
-package (both as a per-module variable and in module names) can be
-used by packages using explicit search to refer to modules in the same
-package; this combination is known as "explicit-relative search".
-
-The PackageImporter and PackageLoader classes together implement the
-following policies:
-
-- There is a root package, whose name is ''. It cannot be imported
- directly but may be referenced, e.g. by using '__' from a top-level
- module.
-
-- In each module or package, the variable '__' contains a reference to
- the parent package; in the root package, '__' points to itself.
-
-- In the name for imported modules (e.g. M in "import M" or "from M
- import ..."), a leading '__' refers to the current package (i.e.
- the package containing the current module); leading '__.__' and so
- on refer to the current package's parent, and so on. The use of
- '__' elsewhere in the module name is not supported.
-
-- Modules are searched using the "expanding search" algorithm by
- virtue of the default value for __domain__.
-
-- If A.B.C is imported, A is searched using __domain__; then
- subpackage B is searched in A using its __path__, and so on.
-
-- Built-in modules have priority: even if a file sys.py exists in a
- package, "import sys" imports the built-in sys module.
-
-- The same holds for frozen modules, for better or for worse.
-
-- Submodules and subpackages are not automatically loaded when their
- parent packages is loaded.
-
-- The construct "from package import *" is illegal. (It can still be
- used to import names from a module.)
-
-- When "from package import module1, module2, ..." is used, those
- modules are explicitly loaded.
-
-- When a package is loaded, if it has a submodule __init__, that
- module is loaded. This is the place where required submodules can
- be loaded, the __path__ variable extended, etc. The __init__ module
- is loaded even if the package was loaded only in order to create a
- stub for a sub-package: if "import P.Q.R" is the first reference to
- P, and P has a submodule __init__, P.__init__ is loaded before P.Q
- is even searched.
-
-Caveats:
-
-- It is possible to import a package that has no __init__ submodule;
- this is not particularly useful but there may be useful applications
- for it (e.g. to manipulate its search paths from the outside!).
-
-- There are no special provisions for os.chdir(). If you plan to use
- os.chdir() before you have imported all your modules, it is better
- not to have relative pathnames in sys.path. (This could actually be
- fixed by changing the implementation of path_join() in the hook to
- absolutize paths.)
-
-- Packages and modules are introduced in sys.modules as soon as their
- loading is started. When the loading is terminated by an exception,
- the sys.modules entries remain around.
-
-- There are no special measures to support mutually recursive modules,
- but it will work under the same conditions where it works in the
- flat module space system.
-
-- Sometimes dummy entries (whose value is None) are entered in
- sys.modules, to indicate that a particular module does not exist --
- this is done to speed up the expanding search algorithm when a
- module residing at a higher level is repeatedly imported (Python
- promises that importing a previously imported module is cheap!)
-
-- Although dynamically loaded extensions are allowed inside packages,
- the current implementation (hardcoded in the interpreter) of their
- initialization may cause problems if an extension invokes the
- interpreter during its initialization.
-
-- reload() may find another version of the module only if it occurs on
- the package search path. Thus, it keeps the connection to the
- package to which the module belongs, but may find a different file.
-
-XXX Need to have an explicit name for '', e.g. '__root__'.
-
-"""
-
-
-import imp
-import sys
-import __builtin__
-
-import ihooks
-from ihooks import ModuleLoader, ModuleImporter
-
-
-class PackageLoader(ModuleLoader):
-
- """A subclass of ModuleLoader with package support.
-
- find_module_in_dir() will succeed if there's a subdirectory with
- the given name; load_module() will create a stub for a package and
- load its __init__ module if it exists.
-
- """
-
- def find_module_in_dir(self, name, dir):
- if dir is not None:
- dirname = self.hooks.path_join(dir, name)
- if self.hooks.path_isdir(dirname):
- return None, dirname, ('', '', 'PACKAGE')
- return ModuleLoader.find_module_in_dir(self, name, dir)
-
- def load_module(self, name, stuff):
- file, filename, info = stuff
- suff, mode, type = info
- if type == 'PACKAGE':
- return self.load_package(name, stuff)
- if sys.modules.has_key(name):
- m = sys.modules[name]
- else:
- sys.modules[name] = m = imp.new_module(name)
- self.set_parent(m)
- if type == imp.C_EXTENSION and '.' in name:
- return self.load_dynamic(name, stuff)
- else:
- return ModuleLoader.load_module(self, name, stuff)
-
- def load_dynamic(self, name, stuff):
- file, filename, (suff, mode, type) = stuff
- # Hack around restriction in imp.load_dynamic()
- i = name.rfind('.')
- tail = name[i+1:]
- if sys.modules.has_key(tail):
- save = sys.modules[tail]
- else:
- save = None
- sys.modules[tail] = imp.new_module(name)
- try:
- m = imp.load_dynamic(tail, filename, file)
- finally:
- if save:
- sys.modules[tail] = save
- else:
- del sys.modules[tail]
- sys.modules[name] = m
- return m
-
- def load_package(self, name, stuff):
- file, filename, info = stuff
- if sys.modules.has_key(name):
- package = sys.modules[name]
- else:
- sys.modules[name] = package = imp.new_module(name)
- package.__path__ = [filename]
- self.init_package(package)
- return package
-
- def init_package(self, package):
- self.set_parent(package)
- self.set_domain(package)
- self.call_init_module(package)
-
- def set_parent(self, m):
- name = m.__name__
- if '.' in name:
- name = name[:name.rfind('.')]
- else:
- name = ''
- m.__ = sys.modules[name]
-
- def set_domain(self, package):
- name = package.__name__
- package.__domain__ = domain = [name]
- while '.' in name:
- name = name[:name.rfind('.')]
- domain.append(name)
- if name:
- domain.append('')
-
- def call_init_module(self, package):
- stuff = self.find_module('__init__', package.__path__)
- if stuff:
- m = self.load_module(package.__name__ + '.__init__', stuff)
- package.__init__ = m
-
-
-class PackageImporter(ModuleImporter):
-
- """Importer that understands packages and '__'."""
-
- def __init__(self, loader = None, verbose = 0):
- ModuleImporter.__init__(self,
- loader or PackageLoader(None, verbose), verbose)
-
- def import_module(self, name, globals={}, locals={}, fromlist=[]):
- if globals.has_key('__'):
- package = globals['__']
- else:
- # No calling context, assume in root package
- package = sys.modules['']
- if name[:3] in ('__.', '__'):
- p = package
- name = name[3:]
- while name[:3] in ('__.', '__'):
- p = p.__
- name = name[3:]
- if not name:
- return self.finish(package, p, '', fromlist)
- if '.' in name:
- i = name.find('.')
- name, tail = name[:i], name[i:]
- else:
- tail = ''
- mname = p.__name__ and p.__name__+'.'+name or name
- m = self.get1(mname)
- return self.finish(package, m, tail, fromlist)
- if '.' in name:
- i = name.find('.')
- name, tail = name[:i], name[i:]
- else:
- tail = ''
- for pname in package.__domain__:
- mname = pname and pname+'.'+name or name
- m = self.get0(mname)
- if m: break
- else:
- raise ImportError, "No such module %s" % name
- return self.finish(m, m, tail, fromlist)
-
- def finish(self, module, m, tail, fromlist):
- # Got ....A; now get ....A.B.C.D
- yname = m.__name__
- if tail and sys.modules.has_key(yname + tail): # Fast path
- yname, tail = yname + tail, ''
- m = self.get1(yname)
- while tail:
- i = tail.find('.', 1)
- if i > 0:
- head, tail = tail[:i], tail[i:]
- else:
- head, tail = tail, ''
- yname = yname + head
- m = self.get1(yname)
-
- # Got ....A.B.C.D; now finalize things depending on fromlist
- if not fromlist:
- return module
- if '__' in fromlist:
- raise ImportError, "Can't import __ from anywhere"
- if not hasattr(m, '__path__'): return m
- if '*' in fromlist:
- raise ImportError, "Can't import * from a package"
- for f in fromlist:
- if hasattr(m, f): continue
- fname = yname + '.' + f
- self.get1(fname)
- return m
-
- def get1(self, name):
- m = self.get(name)
- if not m:
- raise ImportError, "No module named %s" % name
- return m
-
- def get0(self, name):
- m = self.get(name)
- if not m:
- sys.modules[name] = None
- return m
-
- def get(self, name):
- # Internal routine to get or load a module when its parent exists
- if sys.modules.has_key(name):
- return sys.modules[name]
- if '.' in name:
- i = name.rfind('.')
- head, tail = name[:i], name[i+1:]
- else:
- head, tail = '', name
- path = sys.modules[head].__path__
- stuff = self.loader.find_module(tail, path)
- if not stuff:
- return None
- sys.modules[name] = m = self.loader.load_module(name, stuff)
- if head:
- setattr(sys.modules[head], tail, m)
- return m
-
- def reload(self, module):
- name = module.__name__
- if '.' in name:
- i = name.rfind('.')
- head, tail = name[:i], name[i+1:]
- path = sys.modules[head].__path__
- else:
- tail = name
- path = sys.modules[''].__path__
- stuff = self.loader.find_module(tail, path)
- if not stuff:
- raise ImportError, "No module named %s" % name
- return self.loader.load_module(name, stuff)
-
- def unload(self, module):
- if hasattr(module, '__path__'):
- raise ImportError, "don't know how to unload packages yet"
- PackageImporter.unload(self, module)
-
- def install(self):
- if not sys.modules.has_key(''):
- sys.modules[''] = package = imp.new_module('')
- package.__path__ = None
- self.loader.init_package(package)
- for m in sys.modules.values():
- if not m: continue
- if not hasattr(m, '__'):
- self.loader.set_parent(m)
- ModuleImporter.install(self)
-
-
-def install(v = 0):
- ihooks.install(PackageImporter(None, v))
-
-def uninstall():
- ihooks.uninstall()
-
-def ni(v = 0):
- install(v)
-
-def no():
- uninstall()
-
-def test():
- import pdb
- try:
- testproper()
- except:
- sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
- print
- print sys.last_type, ':', sys.last_value
- print
- pdb.pm()
-
-def testproper():
- install(1)
- try:
- import mactest
- print dir(mactest)
- raw_input('OK?')
- finally:
- uninstall()
-
-
-if __name__ == '__main__':
- test()
-else:
- install()
diff --git a/Lib/lib-old/packmail.py b/Lib/lib-old/packmail.py
deleted file mode 100644
index e569108..0000000
--- a/Lib/lib-old/packmail.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Module 'packmail' -- create a self-unpacking shell archive.
-
-# This module works on UNIX and on the Mac; the archives can unpack
-# themselves only on UNIX.
-
-import os
-from stat import ST_MTIME
-
-# Print help
-def help():
- print 'All fns have a file open for writing as first parameter'
- print 'pack(f, fullname, name): pack fullname as name'
- print 'packsome(f, directory, namelist): selected files from directory'
- print 'packall(f, directory): pack all files from directory'
- print 'packnotolder(f, directory, name): pack all files from directory'
- print ' that are not older than a file there'
- print 'packtree(f, directory): pack entire directory tree'
-
-# Pack one file
-def pack(outfp, file, name):
- fp = open(file, 'r')
- outfp.write('echo ' + name + '\n')
- outfp.write('sed "s/^X//" >"' + name + '" <<"!"\n')
- while 1:
- line = fp.readline()
- if not line: break
- if line[-1:] != '\n':
- line = line + '\n'
- outfp.write('X' + line)
- outfp.write('!\n')
- fp.close()
-
-# Pack some files from a directory
-def packsome(outfp, dirname, names):
- for name in names:
- print name
- file = os.path.join(dirname, name)
- pack(outfp, file, name)
-
-# Pack all files from a directory
-def packall(outfp, dirname):
- names = os.listdir(dirname)
- try:
- names.remove('.')
- except:
- pass
- try:
- names.remove('..')
- except:
- pass
- names.sort()
- packsome(outfp, dirname, names)
-
-# Pack all files from a directory that are not older than a give one
-def packnotolder(outfp, dirname, oldest):
- names = os.listdir(dirname)
- try:
- names.remove('.')
- except:
- pass
- try:
- names.remove('..')
- except:
- pass
- oldest = os.path.join(dirname, oldest)
- st = os.stat(oldest)
- mtime = st[ST_MTIME]
- todo = []
- for name in names:
- print name, '...',
- st = os.stat(os.path.join(dirname, name))
- if st[ST_MTIME] >= mtime:
- print 'Yes.'
- todo.append(name)
- else:
- print 'No.'
- todo.sort()
- packsome(outfp, dirname, todo)
-
-# Pack a whole tree (no exceptions)
-def packtree(outfp, dirname):
- print 'packtree', dirname
- outfp.write('mkdir ' + unixfix(dirname) + '\n')
- names = os.listdir(dirname)
- try:
- names.remove('.')
- except:
- pass
- try:
- names.remove('..')
- except:
- pass
- subdirs = []
- for name in names:
- fullname = os.path.join(dirname, name)
- if os.path.isdir(fullname):
- subdirs.append(fullname)
- else:
- print 'pack', fullname
- pack(outfp, fullname, unixfix(fullname))
- for subdirname in subdirs:
- packtree(outfp, subdirname)
-
-def unixfix(name):
- comps = name.split(os.sep)
- res = ''
- for comp in comps:
- if comp:
- if res: res = res + '/'
- res = res + comp
- return res
diff --git a/Lib/lib-old/poly.py b/Lib/lib-old/poly.py
deleted file mode 100644
index fe6a1dc..0000000
--- a/Lib/lib-old/poly.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# module 'poly' -- Polynomials
-
-# A polynomial is represented by a list of coefficients, e.g.,
-# [1, 10, 5] represents 1*x**0 + 10*x**1 + 5*x**2 (or 1 + 10x + 5x**2).
-# There is no way to suppress internal zeros; trailing zeros are
-# taken out by normalize().
-
-def normalize(p): # Strip unnecessary zero coefficients
- n = len(p)
- while n:
- if p[n-1]: return p[:n]
- n = n-1
- return []
-
-def plus(a, b):
- if len(a) < len(b): a, b = b, a # make sure a is the longest
- res = a[:] # make a copy
- for i in range(len(b)):
- res[i] = res[i] + b[i]
- return normalize(res)
-
-def minus(a, b):
- neg_b = map(lambda x: -x, b[:])
- return plus(a, neg_b)
-
-def one(power, coeff): # Representation of coeff * x**power
- res = []
- for i in range(power): res.append(0)
- return res + [coeff]
-
-def times(a, b):
- res = []
- for i in range(len(a)):
- for j in range(len(b)):
- res = plus(res, one(i+j, a[i]*b[j]))
- return res
-
-def power(a, n): # Raise polynomial a to the positive integral power n
- if n == 0: return [1]
- if n == 1: return a
- if n/2*2 == n:
- b = power(a, n/2)
- return times(b, b)
- return times(power(a, n-1), a)
-
-def der(a): # First derivative
- res = a[1:]
- for i in range(len(res)):
- res[i] = res[i] * (i+1)
- return res
-
-# Computing a primitive function would require rational arithmetic...
diff --git a/Lib/lib-old/rand.py b/Lib/lib-old/rand.py
deleted file mode 100644
index a557b69..0000000
--- a/Lib/lib-old/rand.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Module 'rand'
-# Don't use unless you want compatibility with C's rand()!
-
-import whrandom
-
-def srand(seed):
- whrandom.seed(seed%256, seed/256%256, seed/65536%256)
-
-def rand():
- return int(whrandom.random() * 32768.0) % 32768
-
-def choice(seq):
- return seq[rand() % len(seq)]
diff --git a/Lib/lib-old/statcache.py b/Lib/lib-old/statcache.py
deleted file mode 100644
index d478393..0000000
--- a/Lib/lib-old/statcache.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""Maintain a cache of stat() information on files.
-
-There are functions to reset the cache or to selectively remove items.
-"""
-
-import warnings
-warnings.warn("The statcache module is obsolete. Use os.stat() instead.",
- DeprecationWarning)
-del warnings
-
-import os as _os
-from stat import *
-
-__all__ = ["stat","reset","forget","forget_prefix","forget_dir",
- "forget_except_prefix","isdir"]
-
-# The cache. Keys are pathnames, values are os.stat outcomes.
-# Remember that multiple threads may be calling this! So, e.g., that
-# path in cache returns 1 doesn't mean the cache will still contain
-# path on the next line. Code defensively.
-
-cache = {}
-
-def stat(path):
- """Stat a file, possibly out of the cache."""
- ret = cache.get(path, None)
- if ret is None:
- cache[path] = ret = _os.stat(path)
- return ret
-
-def reset():
- """Clear the cache."""
- cache.clear()
-
-# For thread saftey, always use forget() internally too.
-def forget(path):
- """Remove a given item from the cache, if it exists."""
- try:
- del cache[path]
- except KeyError:
- pass
-
-def forget_prefix(prefix):
- """Remove all pathnames with a given prefix."""
- for path in cache.keys():
- if path.startswith(prefix):
- forget(path)
-
-def forget_dir(prefix):
- """Forget a directory and all entries except for entries in subdirs."""
-
- # Remove trailing separator, if any. This is tricky to do in a
- # x-platform way. For example, Windows accepts both / and \ as
- # separators, and if there's nothing *but* a separator we want to
- # preserve that this is the root. Only os.path has the platform
- # knowledge we need.
- from os.path import split, join
- prefix = split(join(prefix, "xxx"))[0]
- forget(prefix)
- for path in cache.keys():
- # First check that the path at least starts with the prefix, so
- # that when it doesn't we can avoid paying for split().
- if path.startswith(prefix) and split(path)[0] == prefix:
- forget(path)
-
-def forget_except_prefix(prefix):
- """Remove all pathnames except with a given prefix.
-
- Normally used with prefix = '/' after a chdir().
- """
-
- for path in cache.keys():
- if not path.startswith(prefix):
- forget(path)
-
-def isdir(path):
- """Return True if directory, else False."""
- try:
- st = stat(path)
- except _os.error:
- return False
- return S_ISDIR(st.st_mode)
diff --git a/Lib/lib-old/tb.py b/Lib/lib-old/tb.py
deleted file mode 100644
index 9063559..0000000
--- a/Lib/lib-old/tb.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# Print tracebacks, with a dump of local variables.
-# Also an interactive stack trace browser.
-# Note -- this module is obsolete -- use pdb.pm() instead.
-
-import sys
-import os
-from stat import *
-import linecache
-
-def br(): browser(sys.last_traceback)
-
-def tb(): printtb(sys.last_traceback)
-
-def browser(tb):
- if not tb:
- print 'No traceback.'
- return
- tblist = []
- while tb:
- tblist.append(tb)
- tb = tb.tb_next
- ptr = len(tblist)-1
- tb = tblist[ptr]
- while 1:
- if tb != tblist[ptr]:
- tb = tblist[ptr]
- print `ptr` + ':',
- printtbheader(tb)
- try:
- line = raw_input('TB: ')
- except KeyboardInterrupt:
- print '\n[Interrupted]'
- break
- except EOFError:
- print '\n[EOF]'
- break
- cmd = line.strip()
- if cmd:
- if cmd == 'quit':
- break
- elif cmd == 'list':
- browserlist(tb)
- elif cmd == 'up':
- if ptr-1 >= 0: ptr = ptr-1
- else: print 'Bottom of stack.'
- elif cmd == 'down':
- if ptr+1 < len(tblist): ptr = ptr+1
- else: print 'Top of stack.'
- elif cmd == 'locals':
- printsymbols(tb.tb_frame.f_locals)
- elif cmd == 'globals':
- printsymbols(tb.tb_frame.f_globals)
- elif cmd in ('?', 'help'):
- browserhelp()
- else:
- browserexec(tb, cmd)
-
-def browserlist(tb):
- filename = tb.tb_frame.f_code.co_filename
- lineno = tb.tb_lineno
- last = lineno
- first = max(1, last-10)
- for i in range(first, last+1):
- if i == lineno: prefix = '***' + `i`.rjust(4) + ':'
- else: prefix = `i`.rjust(7) + ':'
- line = linecache.getline(filename, i)
- if line[-1:] == '\n': line = line[:-1]
- print prefix + line
-
-def browserexec(tb, cmd):
- locals = tb.tb_frame.f_locals
- globals = tb.tb_frame.f_globals
- try:
- exec cmd+'\n' in globals, locals
- except:
- t, v = sys.exc_info()[:2]
- print '*** Exception:',
- if type(t) is type(''):
- print t,
- else:
- print t.__name__,
- if v is not None:
- print ':', v,
- print
- print 'Type help to get help.'
-
-def browserhelp():
- print
- print ' This is the traceback browser. Commands are:'
- print ' up : move one level up in the call stack'
- print ' down : move one level down in the call stack'
- print ' locals : print all local variables at this level'
- print ' globals : print all global variables at this level'
- print ' list : list source code around the failure'
- print ' help : print help (what you are reading now)'
- print ' quit : back to command interpreter'
- print ' Typing any other 1-line statement will execute it'
- print ' using the current level\'s symbol tables'
- print
-
-def printtb(tb):
- while tb:
- print1tb(tb)
- tb = tb.tb_next
-
-def print1tb(tb):
- printtbheader(tb)
- if tb.tb_frame.f_locals is not tb.tb_frame.f_globals:
- printsymbols(tb.tb_frame.f_locals)
-
-def printtbheader(tb):
- filename = tb.tb_frame.f_code.co_filename
- lineno = tb.tb_lineno
- info = '"' + filename + '"(' + `lineno` + ')'
- line = linecache.getline(filename, lineno)
- if line:
- info = info + ': ' + line.strip()
- print info
-
-def printsymbols(d):
- keys = d.keys()
- keys.sort()
- for name in keys:
- print ' ' + name.ljust(12) + ':',
- printobject(d[name], 4)
- print
-
-def printobject(v, maxlevel):
- if v is None:
- print 'None',
- elif type(v) in (type(0), type(0.0)):
- print v,
- elif type(v) is type(''):
- if len(v) > 20:
- print `v[:17] + '...'`,
- else:
- print `v`,
- elif type(v) is type(()):
- print '(',
- printlist(v, maxlevel)
- print ')',
- elif type(v) is type([]):
- print '[',
- printlist(v, maxlevel)
- print ']',
- elif type(v) is type({}):
- print '{',
- printdict(v, maxlevel)
- print '}',
- else:
- print v,
-
-def printlist(v, maxlevel):
- n = len(v)
- if n == 0: return
- if maxlevel <= 0:
- print '...',
- return
- for i in range(min(6, n)):
- printobject(v[i], maxlevel-1)
- if i+1 < n: print ',',
- if n > 6: print '...',
-
-def printdict(v, maxlevel):
- keys = v.keys()
- n = len(keys)
- if n == 0: return
- if maxlevel <= 0:
- print '...',
- return
- keys.sort()
- for i in range(min(6, n)):
- key = keys[i]
- print `key` + ':',
- printobject(v[key], maxlevel-1)
- if i+1 < n: print ',',
- if n > 6: print '...',
diff --git a/Lib/lib-old/tzparse.py b/Lib/lib-old/tzparse.py
deleted file mode 100644
index 12468b5..0000000
--- a/Lib/lib-old/tzparse.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""Parse a timezone specification."""
-
-# XXX Unfinished.
-# XXX Only the typical form "XXXhhYYY;ddd/hh,ddd/hh" is currently supported.
-
-import warnings
-warnings.warn(
- "The tzparse module is obsolete and will disappear in the future",
- DeprecationWarning)
-
-tzpat = ('^([A-Z][A-Z][A-Z])([-+]?[0-9]+)([A-Z][A-Z][A-Z]);'
- '([0-9]+)/([0-9]+),([0-9]+)/([0-9]+)$')
-
-tzprog = None
-
-def tzparse(tzstr):
- """Given a timezone spec, return a tuple of information
- (tzname, delta, dstname, daystart, hourstart, dayend, hourend),
- where 'tzname' is the name of the timezone, 'delta' is the offset
- in hours from GMT, 'dstname' is the name of the daylight-saving
- timezone, and 'daystart'/'hourstart' and 'dayend'/'hourend'
- specify the starting and ending points for daylight saving time."""
- global tzprog
- if tzprog is None:
- import re
- tzprog = re.compile(tzpat)
- match = tzprog.match(tzstr)
- if not match:
- raise ValueError, 'not the TZ syntax I understand'
- subs = []
- for i in range(1, 8):
- subs.append(match.group(i))
- for i in (1, 3, 4, 5, 6):
- subs[i] = eval(subs[i])
- [tzname, delta, dstname, daystart, hourstart, dayend, hourend] = subs
- return (tzname, delta, dstname, daystart, hourstart, dayend, hourend)
-
-def tzlocaltime(secs, params):
- """Given a Unix time in seconds and a tuple of information about
- a timezone as returned by tzparse(), return the local time in the
- form (year, month, day, hour, min, sec, yday, wday, tzname)."""
- import time
- (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = params
- year, month, days, hours, mins, secs, yday, wday, isdst = \
- time.gmtime(secs - delta*3600)
- if (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend):
- tzname = dstname
- hours = hours + 1
- return year, month, days, hours, mins, secs, yday, wday, tzname
-
-def tzset():
- """Determine the current timezone from the "TZ" environment variable."""
- global tzparams, timezone, altzone, daylight, tzname
- import os
- tzstr = os.environ['TZ']
- tzparams = tzparse(tzstr)
- timezone = tzparams[1] * 3600
- altzone = timezone - 3600
- daylight = 1
- tzname = tzparams[0], tzparams[2]
-
-def isdst(secs):
- """Return true if daylight-saving time is in effect for the given
- Unix time in the current timezone."""
- import time
- (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = \
- tzparams
- year, month, days, hours, mins, secs, yday, wday, isdst = \
- time.gmtime(secs - delta*3600)
- return (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend)
-
-tzset()
-
-def localtime(secs):
- """Get the local time in the current timezone."""
- return tzlocaltime(secs, tzparams)
-
-def test():
- from time import asctime, gmtime
- import time, sys
- now = time.time()
- x = localtime(now)
- tm = x[:-1] + (0,)
- print 'now =', now, '=', asctime(tm), x[-1]
- now = now - now % (24*3600)
- if sys.argv[1:]: now = now + eval(sys.argv[1])
- x = gmtime(now)
- tm = x[:-1] + (0,)
- print 'gmtime =', now, '=', asctime(tm), 'yday =', x[-2]
- jan1 = now - x[-2]*24*3600
- x = localtime(jan1)
- tm = x[:-1] + (0,)
- print 'jan1 =', jan1, '=', asctime(tm), x[-1]
- for d in range(85, 95) + range(265, 275):
- t = jan1 + d*24*3600
- x = localtime(t)
- tm = x[:-1] + (0,)
- print 'd =', d, 't =', t, '=', asctime(tm), x[-1]
diff --git a/Lib/lib-old/util.py b/Lib/lib-old/util.py
deleted file mode 100644
index 104af1e..0000000
--- a/Lib/lib-old/util.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Module 'util' -- some useful functions that don't fit elsewhere
-
-# NB: These are now built-in functions, but this module is provided
-# for compatibility. Don't use in new programs unless you need backward
-# compatibility (i.e. need to run with old interpreters).
-
-
-# Remove an item from a list.
-# No complaints if it isn't in the list at all.
-# If it occurs more than once, remove the first occurrence.
-#
-def remove(item, list):
- if item in list: list.remove(item)
-
-
-# Return a string containing a file's contents.
-#
-def readfile(fn):
- return readopenfile(open(fn, 'r'))
-
-
-# Read an open file until EOF.
-#
-def readopenfile(fp):
- return fp.read()
diff --git a/Lib/lib-old/whatsound.py b/Lib/lib-old/whatsound.py
deleted file mode 100644
index 1b1df23..0000000
--- a/Lib/lib-old/whatsound.py
+++ /dev/null
@@ -1 +0,0 @@
-from sndhdr import *
diff --git a/Lib/lib-old/whrandom.py b/Lib/lib-old/whrandom.py
deleted file mode 100644
index bc0d1a4..0000000
--- a/Lib/lib-old/whrandom.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""Wichman-Hill random number generator.
-
-Wichmann, B. A. & Hill, I. D. (1982)
-Algorithm AS 183:
-An efficient and portable pseudo-random number generator
-Applied Statistics 31 (1982) 188-190
-
-see also:
- Correction to Algorithm AS 183
- Applied Statistics 33 (1984) 123
-
- McLeod, A. I. (1985)
- A remark on Algorithm AS 183
- Applied Statistics 34 (1985),198-200
-
-
-USE:
-whrandom.random() yields double precision random numbers
- uniformly distributed between 0 and 1.
-
-whrandom.seed(x, y, z) must be called before whrandom.random()
- to seed the generator
-
-There is also an interface to create multiple independent
-random generators, and to choose from other ranges.
-
-
-
-Multi-threading note: the random number generator used here is not
-thread-safe; it is possible that nearly simultaneous calls in
-different theads return the same random value. To avoid this, you
-have to use a lock around all calls. (I didn't want to slow this
-down in the serial case by using a lock here.)
-"""
-
-import warnings
-warnings.warn("the whrandom module is deprecated; please use the random module",
- DeprecationWarning)
-
-# Translated by Guido van Rossum from C source provided by
-# Adrian Baddeley.
-
-
-class whrandom:
- def __init__(self, x = 0, y = 0, z = 0):
- """Initialize an instance.
- Without arguments, initialize from current time.
- With arguments (x, y, z), initialize from them."""
- self.seed(x, y, z)
-
- def seed(self, x = 0, y = 0, z = 0):
- """Set the seed from (x, y, z).
- These must be integers in the range [0, 256)."""
- if not type(x) == type(y) == type(z) == type(0):
- raise TypeError, 'seeds must be integers'
- if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
- raise ValueError, 'seeds must be in range(0, 256)'
- if 0 == x == y == z:
- # Initialize from current time
- import time
- t = long(time.time() * 256)
- t = int((t&0xffffff) ^ (t>>24))
- t, x = divmod(t, 256)
- t, y = divmod(t, 256)
- t, z = divmod(t, 256)
- # Zero is a poor seed, so substitute 1
- self._seed = (x or 1, y or 1, z or 1)
-
- def random(self):
- """Get the next random number in the range [0.0, 1.0)."""
- # This part is thread-unsafe:
- # BEGIN CRITICAL SECTION
- x, y, z = self._seed
- #
- x = (171 * x) % 30269
- y = (172 * y) % 30307
- z = (170 * z) % 30323
- #
- self._seed = x, y, z
- # END CRITICAL SECTION
- #
- return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
-
- def uniform(self, a, b):
- """Get a random number in the range [a, b)."""
- return a + (b-a) * self.random()
-
- def randint(self, a, b):
- """Get a random integer in the range [a, b] including
- both end points.
-
- (Deprecated; use randrange below.)"""
- return self.randrange(a, b+1)
-
- def choice(self, seq):
- """Choose a random element from a non-empty sequence."""
- return seq[int(self.random() * len(seq))]
-
- def randrange(self, start, stop=None, step=1, int=int, default=None):
- """Choose a random item from range(start, stop[, step]).
-
- This fixes the problem with randint() which includes the
- endpoint; in Python this is usually not what you want.
- Do not supply the 'int' and 'default' arguments."""
- # This code is a bit messy to make it fast for the
- # common case while still doing adequate error checking
- istart = int(start)
- if istart != start:
- raise ValueError, "non-integer arg 1 for randrange()"
- if stop is default:
- if istart > 0:
- return int(self.random() * istart)
- raise ValueError, "empty range for randrange()"
- istop = int(stop)
- if istop != stop:
- raise ValueError, "non-integer stop for randrange()"
- if step == 1:
- if istart < istop:
- return istart + int(self.random() *
- (istop - istart))
- raise ValueError, "empty range for randrange()"
- istep = int(step)
- if istep != step:
- raise ValueError, "non-integer step for randrange()"
- if istep > 0:
- n = (istop - istart + istep - 1) / istep
- elif istep < 0:
- n = (istop - istart + istep + 1) / istep
- else:
- raise ValueError, "zero step for randrange()"
-
- if n <= 0:
- raise ValueError, "empty range for randrange()"
- return istart + istep*int(self.random() * n)
-
-
-# Initialize from the current time
-_inst = whrandom()
-seed = _inst.seed
-random = _inst.random
-uniform = _inst.uniform
-randint = _inst.randint
-choice = _inst.choice
-randrange = _inst.randrange
diff --git a/Lib/lib-old/zmod.py b/Lib/lib-old/zmod.py
deleted file mode 100644
index 55f49df..0000000
--- a/Lib/lib-old/zmod.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# module 'zmod'
-
-# Compute properties of mathematical "fields" formed by taking
-# Z/n (the whole numbers modulo some whole number n) and an
-# irreducible polynomial (i.e., a polynomial with only complex zeros),
-# e.g., Z/5 and X**2 + 2.
-#
-# The field is formed by taking all possible linear combinations of
-# a set of d base vectors (where d is the degree of the polynomial).
-#
-# Note that this procedure doesn't yield a field for all combinations
-# of n and p: it may well be that some numbers have more than one
-# inverse and others have none. This is what we check.
-#
-# Remember that a field is a ring where each element has an inverse.
-# A ring has commutative addition and multiplication, a zero and a one:
-# 0*x = x*0 = 0, 0+x = x+0 = x, 1*x = x*1 = x. Also, the distributive
-# property holds: a*(b+c) = a*b + b*c.
-# (XXX I forget if this is an axiom or follows from the rules.)
-
-import poly
-
-
-# Example N and polynomial
-
-N = 5
-P = poly.plus(poly.one(0, 2), poly.one(2, 1)) # 2 + x**2
-
-
-# Return x modulo y. Returns >= 0 even if x < 0.
-
-def mod(x, y):
- return divmod(x, y)[1]
-
-
-# Normalize a polynomial modulo n and modulo p.
-
-def norm(a, n, p):
- a = poly.modulo(a, p)
- a = a[:]
- for i in range(len(a)): a[i] = mod(a[i], n)
- a = poly.normalize(a)
- return a
-
-
-# Make a list of all n^d elements of the proposed field.
-
-def make_all(mat):
- all = []
- for row in mat:
- for a in row:
- all.append(a)
- return all
-
-def make_elements(n, d):
- if d == 0: return [poly.one(0, 0)]
- sub = make_elements(n, d-1)
- all = []
- for a in sub:
- for i in range(n):
- all.append(poly.plus(a, poly.one(d-1, i)))
- return all
-
-def make_inv(all, n, p):
- x = poly.one(1, 1)
- inv = []
- for a in all:
- inv.append(norm(poly.times(a, x), n, p))
- return inv
-
-def checkfield(n, p):
- all = make_elements(n, len(p)-1)
- inv = make_inv(all, n, p)
- all1 = all[:]
- inv1 = inv[:]
- all1.sort()
- inv1.sort()
- if all1 == inv1: print 'BINGO!'
- else:
- print 'Sorry:', n, p
- print all
- print inv
-
-def rj(s, width):
- if type(s) is not type(''): s = `s`
- n = len(s)
- if n >= width: return s
- return ' '*(width - n) + s
-
-def lj(s, width):
- if type(s) is not type(''): s = `s`
- n = len(s)
- if n >= width: return s
- return s + ' '*(width - n)
diff --git a/Lib/reconvert.py b/Lib/reconvert.py
deleted file mode 100755
index 64bab5b..0000000
--- a/Lib/reconvert.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#! /usr/bin/env python
-
-r"""Convert old ("regex") regular expressions to new syntax ("re").
-
-When imported as a module, there are two functions, with their own
-strings:
-
- convert(s, syntax=None) -- convert a regex regular expression to re syntax
-
- quote(s) -- return a quoted string literal
-
-When used as a script, read a Python string literal (or any other
-expression evaluating to a string) from stdin, and write the
-translated expression to stdout as a string literal. Unless stdout is
-a tty, no trailing \n is written to stdout. This is done so that it
-can be used with Emacs C-U M-| (shell-command-on-region with argument
-which filters the region through the shell command).
-
-No attempt has been made at coding for performance.
-
-Translation table...
-
- \( ( (unless RE_NO_BK_PARENS set)
- \) ) (unless RE_NO_BK_PARENS set)
- \| | (unless RE_NO_BK_VBAR set)
- \< \b (not quite the same, but alla...)
- \> \b (not quite the same, but alla...)
- \` \A
- \' \Z
-
-Not translated...
-
- .
- ^
- $
- *
- + (unless RE_BK_PLUS_QM set, then to \+)
- ? (unless RE_BK_PLUS_QM set, then to \?)
- \
- \b
- \B
- \w
- \W
- \1 ... \9
-
-Special cases...
-
- Non-printable characters are always replaced by their 3-digit
- escape code (except \t, \n, \r, which use mnemonic escapes)
-
- Newline is turned into | when RE_NEWLINE_OR is set
-
-XXX To be done...
-
- [...] (different treatment of backslashed items?)
- [^...] (different treatment of backslashed items?)
- ^ $ * + ? (in some error contexts these are probably treated differently)
- \vDD \DD (in the regex docs but only works when RE_ANSI_HEX set)
-
-"""
-
-
-import warnings
-warnings.filterwarnings("ignore", ".* regex .*", DeprecationWarning, __name__,
- append=1)
-
-import regex
-from regex_syntax import * # RE_*
-
-__all__ = ["convert","quote"]
-
-# Default translation table
-mastertable = {
- r'\<': r'\b',
- r'\>': r'\b',
- r'\`': r'\A',
- r'\'': r'\Z',
- r'\(': '(',
- r'\)': ')',
- r'\|': '|',
- '(': r'\(',
- ')': r'\)',
- '|': r'\|',
- '\t': r'\t',
- '\n': r'\n',
- '\r': r'\r',
-}
-
-
-def convert(s, syntax=None):
- """Convert a regex regular expression to re syntax.
-
- The first argument is the regular expression, as a string object,
- just like it would be passed to regex.compile(). (I.e., pass the
- actual string object -- string quotes must already have been
- removed and the standard escape processing has already been done,
- e.g. by eval().)
-
- The optional second argument is the regex syntax variant to be
- used. This is an integer mask as passed to regex.set_syntax();
- the flag bits are defined in regex_syntax. When not specified, or
- when None is given, the current regex syntax mask (as retrieved by
- regex.get_syntax()) is used -- which is 0 by default.
-
- The return value is a regular expression, as a string object that
- could be passed to re.compile(). (I.e., no string quotes have
- been added -- use quote() below, or repr().)
-
- The conversion is not always guaranteed to be correct. More
- syntactical analysis should be performed to detect borderline
- cases and decide what to do with them. For example, 'x*?' is not
- translated correctly.
-
- """
- table = mastertable.copy()
- if syntax is None:
- syntax = regex.get_syntax()
- if syntax & RE_NO_BK_PARENS:
- del table[r'\('], table[r'\)']
- del table['('], table[')']
- if syntax & RE_NO_BK_VBAR:
- del table[r'\|']
- del table['|']
- if syntax & RE_BK_PLUS_QM:
- table['+'] = r'\+'
- table['?'] = r'\?'
- table[r'\+'] = '+'
- table[r'\?'] = '?'
- if syntax & RE_NEWLINE_OR:
- table['\n'] = '|'
- res = ""
-
- i = 0
- end = len(s)
- while i < end:
- c = s[i]
- i = i+1
- if c == '\\':
- c = s[i]
- i = i+1
- key = '\\' + c
- key = table.get(key, key)
- res = res + key
- else:
- c = table.get(c, c)
- res = res + c
- return res
-
-
-def quote(s, quote=None):
- """Convert a string object to a quoted string literal.
-
- This is similar to repr() but will return a "raw" string (r'...'
- or r"...") when the string contains backslashes, instead of
- doubling all backslashes. The resulting string does *not* always
- evaluate to the same string as the original; however it will do
- just the right thing when passed into re.compile().
-
- The optional second argument forces the string quote; it must be
- a single character which is a valid Python string quote.
-
- """
- if quote is None:
- q = "'"
- altq = "'"
- if q in s and altq not in s:
- q = altq
- else:
- assert quote in ('"', "'", '"""', "'''")
- q = quote
- res = q
- for c in s:
- if c == q: c = '\\' + c
- elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
- res = res + c
- res = res + q
- if '\\' in res:
- res = 'r' + res
- return res
-
-
-def main():
- """Main program -- called when run as a script."""
- import sys
- s = eval(sys.stdin.read())
- sys.stdout.write(quote(convert(s)))
- if sys.stdout.isatty():
- sys.stdout.write("\n")
-
-
-if __name__ == '__main__':
- main()
diff --git a/Lib/regex_syntax.py b/Lib/regex_syntax.py
deleted file mode 100644
index b0a0dbf..0000000
--- a/Lib/regex_syntax.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""Constants for selecting regexp syntaxes for the obsolete regex module.
-
-This module is only for backward compatibility. "regex" has now
-been replaced by the new regular expression module, "re".
-
-These bits are passed to regex.set_syntax() to choose among
-alternative regexp syntaxes.
-"""
-
-# 1 means plain parentheses serve as grouping, and backslash
-# parentheses are needed for literal searching.
-# 0 means backslash-parentheses are grouping, and plain parentheses
-# are for literal searching.
-RE_NO_BK_PARENS = 1
-
-# 1 means plain | serves as the "or"-operator, and \| is a literal.
-# 0 means \| serves as the "or"-operator, and | is a literal.
-RE_NO_BK_VBAR = 2
-
-# 0 means plain + or ? serves as an operator, and \+, \? are literals.
-# 1 means \+, \? are operators and plain +, ? are literals.
-RE_BK_PLUS_QM = 4
-
-# 1 means | binds tighter than ^ or $.
-# 0 means the contrary.
-RE_TIGHT_VBAR = 8
-
-# 1 means treat \n as an _OR operator
-# 0 means treat it as a normal character
-RE_NEWLINE_OR = 16
-
-# 0 means that a special characters (such as *, ^, and $) always have
-# their special meaning regardless of the surrounding context.
-# 1 means that special characters may act as normal characters in some
-# contexts. Specifically, this applies to:
-# ^ - only special at the beginning, or after ( or |
-# $ - only special at the end, or before ) or |
-# *, +, ? - only special when not after the beginning, (, or |
-RE_CONTEXT_INDEP_OPS = 32
-
-# ANSI sequences (\n etc) and \xhh
-RE_ANSI_HEX = 64
-
-# No GNU extensions
-RE_NO_GNU_EXTENSIONS = 128
-
-# Now define combinations of bits for the standard possibilities.
-RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
-RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
-RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
-RE_SYNTAX_EMACS = 0
-
-# (Python's obsolete "regexp" module used a syntax similar to awk.)
diff --git a/Lib/regsub.py b/Lib/regsub.py
deleted file mode 100644
index 0fc10a5..0000000
--- a/Lib/regsub.py
+++ /dev/null
@@ -1,198 +0,0 @@
-"""Regexp-based split and replace using the obsolete regex module.
-
-This module is only for backward compatibility. These operations
-are now provided by the new regular expression module, "re".
-
-sub(pat, repl, str): replace first occurrence of pattern in string
-gsub(pat, repl, str): replace all occurrences of pattern in string
-split(str, pat, maxsplit): split string using pattern as delimiter
-splitx(str, pat, maxsplit): split string using pattern as delimiter plus
- return delimiters
-"""
-
-import warnings
-warnings.warn("the regsub module is deprecated; please use re.sub()",
- DeprecationWarning)
-
-# Ignore further deprecation warnings about this module
-warnings.filterwarnings("ignore", "", DeprecationWarning, __name__)
-
-import regex
-
-__all__ = ["sub","gsub","split","splitx","capwords"]
-
-# Replace first occurrence of pattern pat in string str by replacement
-# repl. If the pattern isn't found, the string is returned unchanged.
-# The replacement may contain references \digit to subpatterns and
-# escaped backslashes. The pattern may be a string or an already
-# compiled pattern.
-
-def sub(pat, repl, str):
- prog = compile(pat)
- if prog.search(str) >= 0:
- regs = prog.regs
- a, b = regs[0]
- str = str[:a] + expand(repl, regs, str) + str[b:]
- return str
-
-
-# Replace all (non-overlapping) occurrences of pattern pat in string
-# str by replacement repl. The same rules as for sub() apply.
-# Empty matches for the pattern are replaced only when not adjacent to
-# a previous match, so e.g. gsub('', '-', 'abc') returns '-a-b-c-'.
-
-def gsub(pat, repl, str):
- prog = compile(pat)
- new = ''
- start = 0
- first = 1
- while prog.search(str, start) >= 0:
- regs = prog.regs
- a, b = regs[0]
- if a == b == start and not first:
- if start >= len(str) or prog.search(str, start+1) < 0:
- break
- regs = prog.regs
- a, b = regs[0]
- new = new + str[start:a] + expand(repl, regs, str)
- start = b
- first = 0
- new = new + str[start:]
- return new
-
-
-# Split string str in fields separated by delimiters matching pattern
-# pat. Only non-empty matches for the pattern are considered, so e.g.
-# split('abc', '') returns ['abc'].
-# The optional 3rd argument sets the number of splits that are performed.
-
-def split(str, pat, maxsplit = 0):
- return intsplit(str, pat, maxsplit, 0)
-
-# Split string str in fields separated by delimiters matching pattern
-# pat. Only non-empty matches for the pattern are considered, so e.g.
-# split('abc', '') returns ['abc']. The delimiters are also included
-# in the list.
-# The optional 3rd argument sets the number of splits that are performed.
-
-
-def splitx(str, pat, maxsplit = 0):
- return intsplit(str, pat, maxsplit, 1)
-
-# Internal function used to implement split() and splitx().
-
-def intsplit(str, pat, maxsplit, retain):
- prog = compile(pat)
- res = []
- start = next = 0
- splitcount = 0
- while prog.search(str, next) >= 0:
- regs = prog.regs
- a, b = regs[0]
- if a == b:
- next = next + 1
- if next >= len(str):
- break
- else:
- res.append(str[start:a])
- if retain:
- res.append(str[a:b])
- start = next = b
- splitcount = splitcount + 1
- if (maxsplit and (splitcount >= maxsplit)):
- break
- res.append(str[start:])
- return res
-
-
-# Capitalize words split using a pattern
-
-def capwords(str, pat='[^a-zA-Z0-9_]+'):
- words = splitx(str, pat)
- for i in range(0, len(words), 2):
- words[i] = words[i].capitalize()
- return "".join(words)
-
-
-# Internal subroutines:
-# compile(pat): compile a pattern, caching already compiled patterns
-# expand(repl, regs, str): expand \digit escapes in replacement string
-
-
-# Manage a cache of compiled regular expressions.
-#
-# If the pattern is a string a compiled version of it is returned. If
-# the pattern has been used before we return an already compiled
-# version from the cache; otherwise we compile it now and save the
-# compiled version in the cache, along with the syntax it was compiled
-# with. Instead of a string, a compiled regular expression can also
-# be passed.
-
-cache = {}
-
-def compile(pat):
- if type(pat) != type(''):
- return pat # Assume it is a compiled regex
- key = (pat, regex.get_syntax())
- if key in cache:
- prog = cache[key] # Get it from the cache
- else:
- prog = cache[key] = regex.compile(pat)
- return prog
-
-
-def clear_cache():
- global cache
- cache = {}
-
-
-# Expand \digit in the replacement.
-# Each occurrence of \digit is replaced by the substring of str
-# indicated by regs[digit]. To include a literal \ in the
-# replacement, double it; other \ escapes are left unchanged (i.e.
-# the \ and the following character are both copied).
-
-def expand(repl, regs, str):
- if '\\' not in repl:
- return repl
- new = ''
- i = 0
- ord0 = ord('0')
- while i < len(repl):
- c = repl[i]; i = i+1
- if c != '\\' or i >= len(repl):
- new = new + c
- else:
- c = repl[i]; i = i+1
- if '0' <= c <= '9':
- a, b = regs[ord(c)-ord0]
- new = new + str[a:b]
- elif c == '\\':
- new = new + c
- else:
- new = new + '\\' + c
- return new
-
-
-# Test program, reads sequences "pat repl str" from stdin.
-# Optional argument specifies pattern used to split lines.
-
-def test():
- import sys
- if sys.argv[1:]:
- delpat = sys.argv[1]
- else:
- delpat = '[ \t\n]+'
- while 1:
- if sys.stdin.isatty(): sys.stderr.write('--> ')
- line = sys.stdin.readline()
- if not line: break
- if line[-1] == '\n': line = line[:-1]
- fields = split(line, delpat)
- if len(fields) != 3:
- print 'Sorry, not three fields'
- print 'split:', repr(fields)
- continue
- [pat, repl, str] = split(line, delpat)
- print 'sub :', repr(sub(pat, repl, str))
- print 'gsub:', repr(gsub(pat, repl, str))
diff --git a/Lib/rexec.py b/Lib/rexec.py
index 89ff509..d289d6a 100644
--- a/Lib/rexec.py
+++ b/Lib/rexec.py
@@ -136,7 +136,7 @@ class RExec(ihooks._Verbose):
ok_builtin_modules = ('audioop', 'array', 'binascii',
'cmath', 'errno', 'imageop',
'marshal', 'math', 'md5', 'operator',
- 'parser', 'regex', 'select',
+ 'parser', 'select',
'sha', '_sre', 'strop', 'struct', 'time',
'_weakref')
diff --git a/Lib/test/test___all__.py b/Lib/test/test___all__.py
index 0b2e7da..0e17830 100644
--- a/Lib/test/test___all__.py
+++ b/Lib/test/test___all__.py
@@ -128,8 +128,6 @@ class AllTest(unittest.TestCase):
self.check_all("quopri")
self.check_all("random")
self.check_all("re")
- self.check_all("reconvert")
- self.check_all("regsub")
self.check_all("repr")
self.check_all("rexec")
self.check_all("rfc822")
diff --git a/Lib/test/test_regex.py b/Lib/test/test_regex.py
deleted file mode 100644
index 2e2c8f65..0000000
--- a/Lib/test/test_regex.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from test.test_support import verbose, sortdict
-import warnings
-warnings.filterwarnings("ignore", "the regex module is deprecated",
- DeprecationWarning, __name__)
-import regex
-from regex_syntax import *
-
-re = 'a+b+c+'
-print 'no match:', regex.match(re, 'hello aaaabcccc world')
-print 'successful search:', regex.search(re, 'hello aaaabcccc world')
-try:
- cre = regex.compile('\(' + re)
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-prev = regex.set_syntax(RE_SYNTAX_AWK)
-print 'successful awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-regex.set_syntax(prev)
-print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-
-re = '\(<one>[0-9]+\) *\(<two>[0-9]+\)'
-print 'matching with group names and compile()'
-cre = regex.compile(re)
-print cre.match('801 999')
-try:
- print cre.group('one')
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-print 'matching with group names and symcomp()'
-cre = regex.symcomp(re)
-print cre.match('801 999')
-print cre.group(0)
-print cre.group('one')
-print cre.group(1, 2)
-print cre.group('one', 'two')
-print 'realpat:', cre.realpat
-print 'groupindex:', sortdict(cre.groupindex)
-
-re = 'world'
-cre = regex.compile(re)
-print 'not case folded search:', cre.search('HELLO WORLD')
-cre = regex.compile(re, regex.casefold)
-print 'case folded search:', cre.search('HELLO WORLD')
-
-print '__members__:', cre.__members__
-print 'regs:', cre.regs
-print 'last:', cre.last
-print 'translate:', len(cre.translate)
-print 'givenpat:', cre.givenpat
-
-print 'match with pos:', cre.match('hello world', 7)
-print 'search with pos:', cre.search('hello world there world', 7)
-print 'bogus group:', cre.group(0, 1, 3)
-try:
- print 'no name:', cre.group('one')
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-from regex_tests import *
-if verbose: print 'Running regex_tests test suite'
-
-for t in tests:
- pattern=s=outcome=repl=expected=None
- if len(t)==5:
- pattern, s, outcome, repl, expected = t
- elif len(t)==3:
- pattern, s, outcome = t
- else:
- raise ValueError, ('Test tuples should have 3 or 5 fields',t)
-
- try:
- obj=regex.compile(pattern)
- except regex.error:
- if outcome==SYNTAX_ERROR: pass # Expected a syntax error
- else:
- # Regex syntax errors aren't yet reported, so for
- # the official test suite they'll be quietly ignored.
- pass
- #print '=== Syntax error:', t
- else:
- try:
- result=obj.search(s)
- except regex.error, msg:
- print '=== Unexpected exception', t, repr(msg)
- if outcome==SYNTAX_ERROR:
- # This should have been a syntax error; forget it.
- pass
- elif outcome==FAIL:
- if result==-1: pass # No match, as expected
- else: print '=== Succeeded incorrectly', t
- elif outcome==SUCCEED:
- if result!=-1:
- # Matched, as expected, so now we compute the
- # result string and compare it to our expected result.
- start, end = obj.regs[0]
- found=s[start:end]
- groups=obj.group(1,2,3,4,5,6,7,8,9,10)
- vardict=vars()
- for i in range(len(groups)):
- vardict['g'+str(i+1)]=str(groups[i])
- repl=eval(repl)
- if repl!=expected:
- print '=== grouping error', t, repr(repl)+' should be '+repr(expected)
- else:
- print '=== Failed incorrectly', t
diff --git a/Lib/test/test_sundry.py b/Lib/test/test_sundry.py
index fd10b68..90610e0 100644
--- a/Lib/test/test_sundry.py
+++ b/Lib/test/test_sundry.py
@@ -68,7 +68,6 @@ import posixfile
import profile
import pstats
import py_compile
-#import reconvert
import repr
try:
import rlcompleter # not available on Windows