summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorThomas Wouters <thomas@python.org>2006-04-21 10:40:58 (GMT)
committerThomas Wouters <thomas@python.org>2006-04-21 10:40:58 (GMT)
commit49fd7fa4431da299196d74087df4a04f99f9c46f (patch)
tree35ace5fe78d3d52c7a9ab356ab9f6dbf8d4b71f4 /Lib
parent9ada3d6e29d5165dadacbe6be07bcd35cfbef59d (diff)
downloadcpython-49fd7fa4431da299196d74087df4a04f99f9c46f.zip
cpython-49fd7fa4431da299196d74087df4a04f99f9c46f.tar.gz
cpython-49fd7fa4431da299196d74087df4a04f99f9c46f.tar.bz2
Merge p3yk branch with the trunk up to revision 45595. This breaks a fair
number of tests, all because of the codecs/_multibytecodecs issue described here (it's not a Py3K issue, just something Py3K discovers): http://mail.python.org/pipermail/python-dev/2006-April/064051.html Hye-Shik Chang promised to look for a fix, so no need to fix it here. The tests that are expected to break are: test_codecencodings_cn test_codecencodings_hk test_codecencodings_jp test_codecencodings_kr test_codecencodings_tw test_codecs test_multibytecodec This merge fixes an actual test failure (test_weakref) in this branch, though, so I believe merging is the right thing to do anyway.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/Queue.py46
-rw-r--r--Lib/SimpleXMLRPCServer.py8
-rw-r--r--Lib/__future__.py4
-rw-r--r--Lib/_threading_local.py72
-rw-r--r--Lib/bdb.py6
-rw-r--r--Lib/bsddb/__init__.py18
-rw-r--r--Lib/bsddb/test/test_all.py1
-rw-r--r--Lib/bsddb/test/test_pickle.py75
-rw-r--r--Lib/calendar.py709
-rw-r--r--Lib/codecs.py36
-rw-r--r--Lib/contextlib.py20
-rw-r--r--Lib/copy_reg.py15
-rw-r--r--Lib/ctypes/__init__.py31
-rw-r--r--Lib/ctypes/_loader.py12
-rw-r--r--Lib/ctypes/test/test_byteswap.py70
-rw-r--r--Lib/ctypes/test/test_cfuncs.py2
-rw-r--r--Lib/ctypes/test/test_keeprefs.py5
-rw-r--r--Lib/ctypes/test/test_loading.py80
-rw-r--r--Lib/ctypes/test/test_pointers.py12
-rw-r--r--Lib/ctypes/test/test_posix.py6
-rw-r--r--Lib/ctypes/test/test_prototypes.py16
-rw-r--r--Lib/ctypes/test/test_random_things.py10
-rw-r--r--Lib/ctypes/test/test_sizes.py3
-rw-r--r--Lib/ctypes/test/test_unaligned_structures.py45
-rw-r--r--Lib/distutils/command/build_ext.py13
-rw-r--r--Lib/distutils/command/install.py1
-rw-r--r--Lib/distutils/command/install_egg_info.py75
-rw-r--r--Lib/distutils/command/upload.py11
-rw-r--r--Lib/distutils/log.py7
-rw-r--r--Lib/distutils/sysconfig.py21
-rw-r--r--Lib/doctest.py26
-rw-r--r--Lib/dummy_thread.py8
-rw-r--r--Lib/easy_install.py5
-rw-r--r--Lib/email/__init__.py77
-rw-r--r--Lib/email/_parseaddr.py10
-rw-r--r--Lib/email/base64mime.py (renamed from Lib/email/base64MIME.py)16
-rw-r--r--Lib/email/charset.py (renamed from Lib/email/Charset.py)52
-rw-r--r--Lib/email/encoders.py (renamed from Lib/email/Encoders.py)12
-rw-r--r--Lib/email/errors.py (renamed from Lib/email/Errors.py)6
-rw-r--r--Lib/email/feedparser.py (renamed from Lib/email/FeedParser.py)21
-rw-r--r--Lib/email/generator.py (renamed from Lib/email/Generator.py)12
-rw-r--r--Lib/email/header.py (renamed from Lib/email/Header.py)23
-rw-r--r--Lib/email/iterators.py (renamed from Lib/email/Iterators.py)10
-rw-r--r--Lib/email/message.py (renamed from Lib/email/Message.py)79
-rw-r--r--Lib/email/mime/__init__.py0
-rw-r--r--Lib/email/mime/application.py36
-rw-r--r--Lib/email/mime/audio.py (renamed from Lib/email/MIMEAudio.py)13
-rw-r--r--Lib/email/mime/base.py (renamed from Lib/email/MIMEBase.py)10
-rw-r--r--Lib/email/mime/image.py (renamed from Lib/email/MIMEImage.py)11
-rw-r--r--Lib/email/mime/message.py (renamed from Lib/email/MIMEMessage.py)12
-rw-r--r--Lib/email/mime/multipart.py (renamed from Lib/email/MIMEMultipart.py)10
-rw-r--r--Lib/email/mime/nonmultipart.py (renamed from Lib/email/MIMENonMultipart.py)12
-rw-r--r--Lib/email/mime/text.py (renamed from Lib/email/MIMEText.py)8
-rw-r--r--Lib/email/parser.py (renamed from Lib/email/Parser.py)9
-rw-r--r--Lib/email/quoprimime.py (renamed from Lib/email/quopriMIME.py)22
-rw-r--r--Lib/email/test/test_email.py129
-rw-r--r--Lib/email/test/test_email_codecs.py7
-rw-r--r--Lib/email/test/test_email_codecs_renamed.py77
-rw-r--r--Lib/email/test/test_email_renamed.py3078
-rw-r--r--Lib/email/utils.py (renamed from Lib/email/Utils.py)19
-rw-r--r--Lib/encodings/big5.py41
-rw-r--r--Lib/encodings/big5hkscs.py41
-rw-r--r--Lib/encodings/cp932.py41
-rw-r--r--Lib/encodings/cp949.py41
-rw-r--r--Lib/encodings/cp950.py41
-rw-r--r--Lib/encodings/euc_jis_2004.py41
-rw-r--r--Lib/encodings/euc_jisx0213.py41
-rw-r--r--Lib/encodings/euc_jp.py41
-rw-r--r--Lib/encodings/euc_kr.py41
-rw-r--r--Lib/encodings/gb18030.py41
-rw-r--r--Lib/encodings/gb2312.py41
-rw-r--r--Lib/encodings/gbk.py41
-rw-r--r--Lib/encodings/hz.py41
-rw-r--r--Lib/encodings/idna.py100
-rw-r--r--Lib/encodings/iso2022_jp.py41
-rw-r--r--Lib/encodings/iso2022_jp_1.py41
-rw-r--r--Lib/encodings/iso2022_jp_2.py41
-rw-r--r--Lib/encodings/iso2022_jp_2004.py41
-rw-r--r--Lib/encodings/iso2022_jp_3.py41
-rw-r--r--Lib/encodings/iso2022_jp_ext.py41
-rw-r--r--Lib/encodings/iso2022_kr.py41
-rw-r--r--Lib/encodings/johab.py41
-rw-r--r--Lib/encodings/shift_jis.py41
-rw-r--r--Lib/encodings/shift_jis_2004.py41
-rw-r--r--Lib/encodings/shift_jisx0213.py41
-rw-r--r--Lib/getpass.py25
-rw-r--r--Lib/glob.py2
-rw-r--r--Lib/idlelib/IOBinding.py1
-rw-r--r--Lib/idlelib/NEWS.txt11
-rw-r--r--Lib/idlelib/idlever.py2
-rw-r--r--Lib/inspect.py6
-rw-r--r--Lib/lib-old/Para.py343
-rw-r--r--Lib/lib-old/addpack.py67
-rw-r--r--Lib/lib-old/cmp.py63
-rw-r--r--Lib/lib-old/cmpcache.py64
-rw-r--r--Lib/lib-old/codehack.py81
-rw-r--r--Lib/lib-old/dircmp.py202
-rw-r--r--Lib/lib-old/dump.py63
-rw-r--r--Lib/lib-old/find.py26
-rw-r--r--Lib/lib-old/fmt.py623
-rw-r--r--Lib/lib-old/grep.py79
-rw-r--r--Lib/lib-old/lockfile.py15
-rw-r--r--Lib/lib-old/newdir.py73
-rw-r--r--Lib/lib-old/ni.py433
-rw-r--r--Lib/lib-old/packmail.py111
-rw-r--r--Lib/lib-old/poly.py52
-rw-r--r--Lib/lib-old/rand.py13
-rw-r--r--Lib/lib-old/statcache.py82
-rw-r--r--Lib/lib-old/tb.py177
-rw-r--r--Lib/lib-old/tzparse.py98
-rw-r--r--Lib/lib-old/util.py25
-rw-r--r--Lib/lib-old/whatsound.py1
-rw-r--r--Lib/lib-old/whrandom.py144
-rw-r--r--Lib/lib-old/zmod.py94
-rwxr-xr-xLib/lib-tk/Tix.py98
-rw-r--r--Lib/lib-tk/Tkinter.py24
-rw-r--r--Lib/lib-tk/tkFont.py4
-rw-r--r--Lib/linecache.py36
-rw-r--r--Lib/logging/__init__.py2
-rw-r--r--Lib/mimetools.py5
-rw-r--r--Lib/mimetypes.py321
-rwxr-xr-xLib/pdb.py125
-rw-r--r--Lib/pkg_resources.py2377
-rw-r--r--Lib/pkgutil.py425
-rw-r--r--Lib/plat-mac/applesingle.py2
-rwxr-xr-xLib/platform.py3
-rw-r--r--Lib/popen2.py37
-rw-r--r--Lib/pstats.py144
-rwxr-xr-xLib/pydoc.py182
-rw-r--r--Lib/random.py34
-rwxr-xr-xLib/reconvert.py192
-rw-r--r--Lib/regex_syntax.py53
-rw-r--r--Lib/regsub.py198
-rw-r--r--Lib/rexec.py2
-rwxr-xr-xLib/runpy.py377
-rw-r--r--Lib/setuptools.egg-info/PKG-INFO89
-rwxr-xr-xLib/setuptools.egg-info/entry_points.txt51
-rw-r--r--Lib/setuptools.egg-info/top_level.txt3
-rw-r--r--Lib/setuptools.egg-info/zip-safe0
-rw-r--r--Lib/setuptools/__init__.py64
-rwxr-xr-xLib/setuptools/archive_util.py200
-rwxr-xr-xLib/setuptools/cli.exebin0 -> 6144 bytes
-rw-r--r--Lib/setuptools/command/__init__.py19
-rwxr-xr-xLib/setuptools/command/alias.py79
-rw-r--r--Lib/setuptools/command/bdist_egg.py449
-rwxr-xr-xLib/setuptools/command/bdist_rpm.py37
-rw-r--r--Lib/setuptools/command/build_ext.py285
-rw-r--r--Lib/setuptools/command/build_py.py192
-rwxr-xr-xLib/setuptools/command/develop.py116
-rwxr-xr-xLib/setuptools/command/easy_install.py1555
-rwxr-xr-xLib/setuptools/command/egg_info.py365
-rw-r--r--Lib/setuptools/command/install.py101
-rwxr-xr-xLib/setuptools/command/install_egg_info.py81
-rw-r--r--Lib/setuptools/command/install_lib.py76
-rwxr-xr-xLib/setuptools/command/install_scripts.py56
-rwxr-xr-xLib/setuptools/command/rotate.py57
-rwxr-xr-xLib/setuptools/command/saveopts.py24
-rwxr-xr-xLib/setuptools/command/sdist.py163
-rwxr-xr-xLib/setuptools/command/setopt.py158
-rw-r--r--Lib/setuptools/command/test.py119
-rwxr-xr-xLib/setuptools/command/upload.py178
-rw-r--r--Lib/setuptools/depends.py239
-rw-r--r--Lib/setuptools/dist.py798
-rw-r--r--Lib/setuptools/extension.py35
-rwxr-xr-xLib/setuptools/gui.exebin0 -> 6144 bytes
-rwxr-xr-xLib/setuptools/package_index.py674
-rwxr-xr-xLib/setuptools/sandbox.py203
-rwxr-xr-xLib/setuptools/site-patch.py74
-rw-r--r--Lib/setuptools/tests/__init__.py364
-rwxr-xr-xLib/setuptools/tests/api_tests.txt330
-rw-r--r--Lib/setuptools/tests/test_resources.py483
-rw-r--r--Lib/sgmllib.py34
-rw-r--r--Lib/site.py2
-rwxr-xr-xLib/smtplib.py6
-rw-r--r--Lib/socket.py12
-rw-r--r--Lib/sqlite3/__init__.py24
-rw-r--r--Lib/sqlite3/dbapi2.py84
-rw-r--r--Lib/sqlite3/test/__init__.py0
-rw-r--r--Lib/sqlite3/test/dbapi.py732
-rw-r--r--Lib/sqlite3/test/factory.py164
-rw-r--r--Lib/sqlite3/test/hooks.py115
-rw-r--r--Lib/sqlite3/test/regression.py48
-rw-r--r--Lib/sqlite3/test/transactions.py156
-rw-r--r--Lib/sqlite3/test/types.py339
-rw-r--r--Lib/sqlite3/test/userfunctions.py330
-rw-r--r--Lib/sre.py10
-rw-r--r--Lib/subprocess.py36
-rw-r--r--Lib/telnetlib.py2
-rw-r--r--Lib/test/check_soundcard.vbs13
-rw-r--r--Lib/test/crashers/README5
-rw-r--r--Lib/test/crashers/dictresize_attack.py32
-rw-r--r--Lib/test/crashers/nasty_eq_vs_dict.py47
-rw-r--r--Lib/test/empty.vbs1
-rw-r--r--Lib/test/fork_wait.py71
-rw-r--r--Lib/test/leakers/README.txt13
-rw-r--r--Lib/test/leakers/test_ctypes.py16
-rw-r--r--Lib/test/leakers/test_selftype.py13
-rw-r--r--Lib/test/leakers/test_tee.py19
-rw-r--r--Lib/test/output/test_augassign54
-rw-r--r--Lib/test/output/test_coercion1054
-rw-r--r--Lib/test/output/test_compare101
-rwxr-xr-xLib/test/regrtest.py67
-rw-r--r--Lib/test/test___all__.py4
-rw-r--r--Lib/test/test_applesingle.py4
-rwxr-xr-xLib/test/test_array.py2
-rw-r--r--Lib/test/test_ast.py11
-rw-r--r--Lib/test/test_audioop.py24
-rw-r--r--Lib/test/test_augassign.py566
-rwxr-xr-xLib/test/test_bsddb.py9
-rw-r--r--Lib/test/test_builtin.py4
-rw-r--r--Lib/test/test_calendar.py265
-rw-r--r--Lib/test/test_capi.py87
-rw-r--r--Lib/test/test_cmd_line.py3
-rw-r--r--Lib/test/test_codecs.py93
-rw-r--r--Lib/test/test_coercion.py324
-rw-r--r--Lib/test/test_compare.py51
-rw-r--r--Lib/test/test_compile.py4
-rw-r--r--Lib/test/test_compiler.py13
-rw-r--r--Lib/test/test_contextlib.py69
-rw-r--r--Lib/test/test_copy_reg.py29
-rw-r--r--Lib/test/test_curses.py15
-rw-r--r--Lib/test/test_datetime.py11
-rw-r--r--Lib/test/test_decimal.py50
-rw-r--r--Lib/test/test_descr.py12
-rw-r--r--Lib/test/test_difflib.py8
-rwxr-xr-xLib/test/test_dl.py1
-rw-r--r--Lib/test/test_doctest.py46
-rw-r--r--Lib/test/test_email_renamed.py13
-rw-r--r--Lib/test/test_file.py16
-rw-r--r--Lib/test/test_fileinput.py5
-rw-r--r--Lib/test/test_fork1.py76
-rw-r--r--Lib/test/test_generators.py98
-rw-r--r--Lib/test/test_genexps.py2
-rw-r--r--Lib/test/test_getargs2.py19
-rw-r--r--Lib/test/test_glob.py8
-rw-r--r--Lib/test/test_grammar.py9
-rw-r--r--Lib/test/test_index.py137
-rw-r--r--Lib/test/test_inspect.py6
-rw-r--r--Lib/test/test_mimetypes.py1
-rw-r--r--Lib/test/test_multibytecodec.py135
-rw-r--r--Lib/test/test_multibytecodec_support.py210
-rw-r--r--Lib/test/test_optparse.py7
-rw-r--r--Lib/test/test_parser.py4
-rw-r--r--Lib/test/test_platform.py7
-rw-r--r--Lib/test/test_popen2.py4
-rw-r--r--Lib/test/test_posix.py5
-rw-r--r--Lib/test/test_pty.py28
-rw-r--r--Lib/test/test_pyclbr.py5
-rw-r--r--Lib/test/test_queue.py44
-rw-r--r--Lib/test/test_quopri.py21
-rw-r--r--Lib/test/test_random.py21
-rw-r--r--Lib/test/test_regex.py113
-rw-r--r--Lib/test/test_set.py4
-rw-r--r--Lib/test/test_setuptools.py16
-rw-r--r--Lib/test/test_sgmllib.py14
-rw-r--r--Lib/test/test_socket.py40
-rw-r--r--Lib/test/test_socket_ssl.py39
-rw-r--r--Lib/test/test_sqlite.py16
-rw-r--r--Lib/test/test_startfile.py37
-rw-r--r--Lib/test/test_sundry.py54
-rw-r--r--Lib/test/test_sys.py5
-rw-r--r--Lib/test/test_timeout.py5
-rw-r--r--Lib/test/test_tokenize.py134
-rw-r--r--Lib/test/test_trace.py62
-rw-r--r--Lib/test/test_traceback.py10
-rw-r--r--Lib/test/test_unicode.py16
-rw-r--r--Lib/test/test_urllib2.py38
-rw-r--r--Lib/test/test_urlparse.py101
-rw-r--r--Lib/test/test_wait3.py32
-rw-r--r--Lib/test/test_wait4.py29
-rw-r--r--Lib/test/test_warnings.py4
-rw-r--r--Lib/test/test_winsound.py149
-rw-r--r--Lib/test/test_with.py56
-rw-r--r--Lib/test/test_xrange.py7
-rw-r--r--Lib/test/test_zipimport.py83
-rw-r--r--Lib/tokenize.py2
-rw-r--r--Lib/traceback.py10
-rw-r--r--Lib/urllib.py4
-rw-r--r--Lib/urllib2.py205
-rw-r--r--Lib/urlparse.py131
-rwxr-xr-xLib/uu.py4
-rw-r--r--Lib/warnings.py10
-rw-r--r--Lib/xmlcore/dom/expatbuilder.py14
-rw-r--r--Lib/xmlcore/dom/minicompat.py175
-rw-r--r--Lib/xmlcore/dom/minidom.py16
-rw-r--r--Lib/xmlcore/dom/xmlbuilder.py6
286 files changed, 21939 insertions, 7546 deletions
diff --git a/Lib/Queue.py b/Lib/Queue.py
index c6c608b..51ad354 100644
--- a/Lib/Queue.py
+++ b/Lib/Queue.py
@@ -35,6 +35,51 @@ class Queue:
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
+ # Notify all_tasks_done whenever the number of unfinished tasks
+ # drops to zero; thread waiting to join() is notified to resume
+ self.all_tasks_done = threading.Condition(self.mutex)
+ self.unfinished_tasks = 0
+
+ def task_done(self):
+ """Indicate that a formerly enqueued task is complete.
+
+ Used by Queue consumer threads. For each get() used to fetch a task,
+ a subsequent call to task_done() tells the queue that the processing
+ on the task is complete.
+
+ If a join() is currently blocking, it will resume when all items
+ have been processed (meaning that a task_done() call was received
+ for every item that had been put() into the queue).
+
+ Raises a ValueError if called more times than there were items
+ placed in the queue.
+ """
+ self.all_tasks_done.acquire()
+ try:
+ unfinished = self.unfinished_tasks - 1
+ if unfinished <= 0:
+ if unfinished < 0:
+ raise ValueError('task_done() called too many times')
+ self.all_tasks_done.notifyAll()
+ self.unfinished_tasks = unfinished
+ finally:
+ self.all_tasks_done.release()
+
+ def join(self):
+ """Blocks until all items in the Queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the
+ queue. The count goes down whenever a consumer thread calls task_done()
+ to indicate the item was retrieved and all work on it is complete.
+
+ When the count of unfinished tasks drops to zero, join() unblocks.
+ """
+ self.all_tasks_done.acquire()
+ try:
+ while self.unfinished_tasks:
+ self.all_tasks_done.wait()
+ finally:
+ self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
@@ -86,6 +131,7 @@ class Queue:
raise Full
self.not_full.wait(remaining)
self._put(item)
+ self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
diff --git a/Lib/SimpleXMLRPCServer.py b/Lib/SimpleXMLRPCServer.py
index 156c2ba..a0b44e1 100644
--- a/Lib/SimpleXMLRPCServer.py
+++ b/Lib/SimpleXMLRPCServer.py
@@ -104,7 +104,11 @@ from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
-import os, fcntl
+import os
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
@@ -493,7 +497,7 @@ class SimpleXMLRPCServer(SocketServer.TCPServer,
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
- if hasattr(fcntl, 'FD_CLOEXEC'):
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
diff --git a/Lib/__future__.py b/Lib/__future__.py
index d95ce5f..d8e14d1 100644
--- a/Lib/__future__.py
+++ b/Lib/__future__.py
@@ -64,7 +64,7 @@ __all__ = ["all_feature_names"] + all_feature_names
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
-CO_FUTURE_ABSIMPORT = 0x4000 # absolute_import
+CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
class _Feature:
@@ -109,7 +109,7 @@ division = _Feature((2, 2, 0, "alpha", 2),
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(2, 7, 0, "alpha", 0),
- CO_FUTURE_ABSIMPORT)
+ CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
diff --git a/Lib/_threading_local.py b/Lib/_threading_local.py
index 90717a8..f0ce857 100644
--- a/Lib/_threading_local.py
+++ b/Lib/_threading_local.py
@@ -1,9 +1,9 @@
-"""Thread-local objects
+"""Thread-local objects.
-(Note that this module provides a Python version of thread
- threading.local class. Depending on the version of Python you're
- using, there may be a faster one available. You should always import
- the local class from threading.)
+(Note that this module provides a Python version of the threading.local
+ class. Depending on the version of Python you're using, there may be a
+ faster one available. You should always import the `local` class from
+ `threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
@@ -133,7 +133,17 @@ affects what we see:
>>> del mydata
"""
-# Threading import is at end
+__all__ = ["local"]
+
+# We need to use objects from the threading module, but the threading
+# module may also want to use our `local` class, if support for locals
+# isn't compiled in to the `thread` module. This creates potential problems
+# with circular imports. For that reason, we don't import `threading`
+# until the bottom of this file (a hack sufficient to worm around the
+# potential problems). Note that almost all platforms do have support for
+# locals in the `thread` module, and there is no circular import problem
+# then, so problems introduced by fiddling the order of imports here won't
+# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
@@ -202,36 +212,30 @@ class local(_localbase):
finally:
lock.release()
+ def __del__(self):
+ import threading
- def __del__():
- threading_enumerate = enumerate
- __getattribute__ = object.__getattribute__
-
- def __del__(self):
- key = __getattribute__(self, '_local__key')
+ key = object.__getattribute__(self, '_local__key')
+ try:
+ threads = list(threading.enumerate())
+ except:
+ # If enumerate fails, as it seems to do during
+ # shutdown, we'll skip cleanup under the assumption
+ # that there is nothing to clean up.
+ return
+
+ for thread in threads:
try:
- threads = list(threading_enumerate())
- except:
- # if enumerate fails, as it seems to do during
- # shutdown, we'll skip cleanup under the assumption
- # that there is nothing to clean up
- return
-
- for thread in threads:
- try:
- __dict__ = thread.__dict__
- except AttributeError:
- # Thread is dying, rest in peace
- continue
-
- if key in __dict__:
- try:
- del __dict__[key]
- except KeyError:
- pass # didn't have anything in this thread
+ __dict__ = thread.__dict__
+ except AttributeError:
+ # Thread is dying, rest in peace.
+ continue
- return __del__
- __del__ = __del__()
+ if key in __dict__:
+ try:
+ del __dict__[key]
+ except KeyError:
+ pass # didn't have anything in this thread
-from threading import currentThread, enumerate, RLock
+from threading import currentThread, RLock
diff --git a/Lib/bdb.py b/Lib/bdb.py
index 8f808cc..08b48c3 100644
--- a/Lib/bdb.py
+++ b/Lib/bdb.py
@@ -479,10 +479,10 @@ class Breakpoint:
else:
disp = 'keep '
if self.enabled:
- disp = disp + 'yes'
+ disp = disp + 'yes '
else:
- disp = disp + 'no '
- print '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
+ disp = disp + 'no '
+ print '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print '\tstop only if %s' % (self.cond,)
diff --git a/Lib/bsddb/__init__.py b/Lib/bsddb/__init__.py
index d3ee773..c004c08 100644
--- a/Lib/bsddb/__init__.py
+++ b/Lib/bsddb/__init__.py
@@ -287,10 +287,9 @@ def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
cachesize=None, lorder=None, hflags=0):
flags = _checkflag(flag, file)
- e = _openDBEnv()
+ e = _openDBEnv(cachesize)
d = db.DB(e)
d.set_flags(hflags)
- if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
if ffactor is not None: d.set_h_ffactor(ffactor)
@@ -305,9 +304,8 @@ def btopen(file, flag='c', mode=0666,
pgsize=None, lorder=None):
flags = _checkflag(flag, file)
- e = _openDBEnv()
+ e = _openDBEnv(cachesize)
d = db.DB(e)
- if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(btflags)
@@ -324,9 +322,8 @@ def rnopen(file, flag='c', mode=0666,
rlen=None, delim=None, source=None, pad=None):
flags = _checkflag(flag, file)
- e = _openDBEnv()
+ e = _openDBEnv(cachesize)
d = db.DB(e)
- if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(rnflags)
@@ -339,8 +336,13 @@ def rnopen(file, flag='c', mode=0666,
#----------------------------------------------------------------------
-def _openDBEnv():
+def _openDBEnv(cachesize):
e = db.DBEnv()
+ if cachesize is not None:
+ if cachesize >= 20480:
+ e.set_cachesize(0, cachesize)
+ else:
+ raise error, "cachesize must be >= 20480"
e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
return e
@@ -358,7 +360,7 @@ def _checkflag(flag, file):
#flags = db.DB_CREATE | db.DB_TRUNCATE
# we used db.DB_TRUNCATE flag for this before but BerkeleyDB
# 4.2.52 changed to disallowed truncate with txn environments.
- if os.path.isfile(file):
+ if file is not None and os.path.isfile(file):
os.unlink(file)
else:
raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
diff --git a/Lib/bsddb/test/test_all.py b/Lib/bsddb/test/test_all.py
index 972cd06..abfaf47 100644
--- a/Lib/bsddb/test/test_all.py
+++ b/Lib/bsddb/test/test_all.py
@@ -65,6 +65,7 @@ def suite():
'test_join',
'test_lock',
'test_misc',
+ 'test_pickle',
'test_queue',
'test_recno',
'test_thread',
diff --git a/Lib/bsddb/test/test_pickle.py b/Lib/bsddb/test/test_pickle.py
new file mode 100644
index 0000000..3916e5c
--- /dev/null
+++ b/Lib/bsddb/test/test_pickle.py
@@ -0,0 +1,75 @@
+
+import sys, os, string
+import pickle
+try:
+ import cPickle
+except ImportError:
+ cPickle = None
+import unittest
+import glob
+
+try:
+ # For Pythons w/distutils pybsddb
+ from bsddb3 import db
+except ImportError, e:
+ # For Python 2.3
+ from bsddb import db
+
+
+#----------------------------------------------------------------------
+
+class pickleTestCase(unittest.TestCase):
+ """Verify that DBError can be pickled and unpickled"""
+ db_home = 'db_home'
+ db_name = 'test-dbobj.db'
+
+ def setUp(self):
+ homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
+ self.homeDir = homeDir
+ try: os.mkdir(homeDir)
+ except os.error: pass
+
+ def tearDown(self):
+ if hasattr(self, 'db'):
+ del self.db
+ if hasattr(self, 'env'):
+ del self.env
+ files = glob.glob(os.path.join(self.homeDir, '*'))
+ for file in files:
+ os.remove(file)
+
+ def _base_test_pickle_DBError(self, pickle):
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ self.db = db.DB(self.env)
+ self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
+ self.db.put('spam', 'eggs')
+ assert self.db['spam'] == 'eggs'
+ try:
+ self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE)
+ except db.DBError, egg:
+ pickledEgg = pickle.dumps(egg)
+ #print repr(pickledEgg)
+ rottenEgg = pickle.loads(pickledEgg)
+ if rottenEgg.args != egg.args or type(rottenEgg) != type(egg):
+ raise Exception, (rottenEgg, '!=', egg)
+ else:
+ raise Exception, "where's my DBError exception?!?"
+
+ self.db.close()
+ self.env.close()
+
+ def test01_pickle_DBError(self):
+ self._base_test_pickle_DBError(pickle=pickle)
+
+ if cPickle:
+ def test02_cPickle_DBError(self):
+ self._base_test_pickle_DBError(pickle=cPickle)
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ return unittest.makeSuite(pickleTestCase)
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/Lib/calendar.py b/Lib/calendar.py
index 3ffcff5..7800aae 100644
--- a/Lib/calendar.py
+++ b/Lib/calendar.py
@@ -5,17 +5,32 @@ default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
-import datetime
+from __future__ import with_statement
+import sys, datetime, locale
-__all__ = ["error","setfirstweekday","firstweekday","isleap",
- "leapdays","weekday","monthrange","monthcalendar",
- "prmonth","month","prcal","calendar","timegm",
- "month_name", "month_abbr", "day_name", "day_abbr",
- "weekheader"]
+__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
+ "firstweekday", "isleap", "leapdays", "weekday", "monthrange",
+ "monthcalendar", "prmonth", "month", "prcal", "calendar",
+ "timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
+# Exceptions raised for bad input
+class IllegalMonthError(ValueError):
+ def __init__(self, month):
+ self.month = month
+ def __str__(self):
+ return "bad month number %r; must be 1-12" % self.month
+
+
+class IllegalWeekdayError(ValueError):
+ def __init__(self, weekday):
+ self.weekday = weekday
+ def __str__(self):
+ return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
+
+
# Constants for months referenced later
January = 1
February = 2
@@ -30,7 +45,7 @@ mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
class _localized_month:
- _months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
+ _months = [datetime.date(2001, i+1, 1).strftime for i in xrange(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
@@ -46,10 +61,11 @@ class _localized_month:
def __len__(self):
return 13
+
class _localized_day:
# January 1, 2001, was a Monday.
- _days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
+ _days = [datetime.date(2001, 1, i+1).strftime for i in xrange(7)]
def __init__(self, format):
self.format = format
@@ -64,6 +80,7 @@ class _localized_day:
def __len__(self):
return 7
+
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
@@ -75,23 +92,12 @@ month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
-_firstweekday = 0 # 0 = Monday, 6 = Sunday
-
-def firstweekday():
- return _firstweekday
-
-def setfirstweekday(weekday):
- """Set weekday (Monday=0, Sunday=6) to start each week."""
- global _firstweekday
- if not MONDAY <= weekday <= SUNDAY:
- raise ValueError, \
- 'bad weekday number; must be 0 (Monday) to 6 (Sunday)'
- _firstweekday = weekday
def isleap(year):
"""Return 1 for leap years, 0 for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
+
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
@@ -99,128 +105,501 @@ def leapdays(y1, y2):
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
+
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
+
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
- raise ValueError, 'bad month number'
+ raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
-def monthcalendar(year, month):
- """Return a matrix representing a month's calendar.
- Each row represents a week; days outside this month are zero."""
- day1, ndays = monthrange(year, month)
- rows = []
- r7 = range(7)
- day = (_firstweekday - day1 + 6) % 7 - 5 # for leading 0's in first week
- while day <= ndays:
- row = [0, 0, 0, 0, 0, 0, 0]
- for i in r7:
- if 1 <= day <= ndays: row[i] = day
- day = day + 1
- rows.append(row)
- return rows
-
-def prweek(theweek, width):
- """Print a single week (no newline)."""
- print week(theweek, width),
-
-def week(theweek, width):
- """Returns a single week in a string (no newline)."""
- days = []
- for day in theweek:
+
+class Calendar(object):
+ """
+ Base calendar class. This class doesn't do any formatting. It simply
+ provides data to subclasses.
+ """
+
+ def __init__(self, firstweekday=0):
+ self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
+
+ def getfirstweekday(self):
+ return self._firstweekday % 7
+
+ def setfirstweekday(self, firstweekday):
+ self._firstweekday = firstweekday
+
+ firstweekday = property(getfirstweekday, setfirstweekday)
+
+ def iterweekdays(self):
+ """
+ Return a iterator for one week of weekday numbers starting with the
+ configured first one.
+ """
+ for i in xrange(self.firstweekday, self.firstweekday + 7):
+ yield i%7
+
+ def itermonthdates(self, year, month):
+ """
+ Return an iterator for one month. The iterator will yield datetime.date
+ values and will always iterate through complete weeks, so it will yield
+ dates outside the specified month.
+ """
+ date = datetime.date(year, month, 1)
+ # Go back to the beginning of the week
+ days = (date.weekday() - self.firstweekday) % 7
+ date -= datetime.timedelta(days=days)
+ oneday = datetime.timedelta(days=1)
+ while True:
+ yield date
+ date += oneday
+ if date.month != month and date.weekday() == self.firstweekday:
+ break
+
+ def itermonthdays2(self, year, month):
+ """
+ Like itermonthdates(), but will yield (day number, weekday number)
+ tuples. For days outside the specified month the day number is 0.
+ """
+ for date in self.itermonthdates(year, month):
+ if date.month != month:
+ yield (0, date.weekday())
+ else:
+ yield (date.day, date.weekday())
+
+ def itermonthdays(self, year, month):
+ """
+ Like itermonthdates(), but will yield day numbers tuples. For days
+ outside the specified month the day number is 0.
+ """
+ for date in self.itermonthdates(year, month):
+ if date.month != month:
+ yield 0
+ else:
+ yield date.day
+
+ def monthdatescalendar(self, year, month):
+ """
+ Return a matrix (list of lists) representing a month's calendar.
+ Each row represents a week; week entries are datetime.date values.
+ """
+ dates = list(self.itermonthdates(year, month))
+ return [ dates[i:i+7] for i in xrange(0, len(dates), 7) ]
+
+ def monthdays2calendar(self, year, month):
+ """
+ Return a matrix representing a month's calendar.
+ Each row represents a week; week entries are
+ (day number, weekday number) tuples. Day numbers outside this month
+ are zero.
+ """
+ days = list(self.itermonthdays2(year, month))
+ return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
+
+ def monthdayscalendar(self, year, month):
+ """
+ Return a matrix representing a month's calendar.
+ Each row represents a week; days outside this month are zero.
+ """
+ days = list(self.itermonthdays(year, month))
+ return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
+
+ def yeardatescalendar(self, year, width=3):
+ """
+ Return the data for the specified year ready for formatting. The return
+ value is a list of month rows. Each month row contains upto width months.
+ Each month contains between 4 and 6 weeks and each week contains 1-7
+ days. Days are datetime.date objects.
+ """
+ months = [
+ self.monthdatescalendar(year, i)
+ for i in xrange(January, January+12)
+ ]
+ return [months[i:i+width] for i in xrange(0, len(months), width) ]
+
+ def yeardays2calendar(self, year, width=3):
+ """
+ Return the data for the specified year ready for formatting (similar to
+ yeardatescalendar()). Entries in the week lists are
+ (day number, weekday number) tuples. Day numbers outside this month are
+ zero.
+ """
+ months = [
+ self.monthdays2calendar(year, i)
+ for i in xrange(January, January+12)
+ ]
+ return [months[i:i+width] for i in xrange(0, len(months), width) ]
+
+ def yeardayscalendar(self, year, width=3):
+ """
+ Return the data for the specified year ready for formatting (similar to
+ yeardatescalendar()). Entries in the week lists are day numbers.
+ Day numbers outside this month are zero.
+ """
+ months = [
+ self.monthdayscalendar(year, i)
+ for i in xrange(January, January+12)
+ ]
+ return [months[i:i+width] for i in xrange(0, len(months), width) ]
+
+
+class TextCalendar(Calendar):
+ """
+ Subclass of Calendar that outputs a calendar as a simple plain text
+ similar to the UNIX program cal.
+ """
+
+ def prweek(self, theweek, width):
+ """
+ Print a single week (no newline).
+ """
+ print self.week(theweek, width),
+
+ def formatday(self, day, weekday, width):
+ """
+ Returns a formatted day.
+ """
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
- days.append(s.center(width))
- return ' '.join(days)
-
-def weekheader(width):
- """Return a header for a week."""
- if width >= 9:
- names = day_name
- else:
- names = day_abbr
- days = []
- for i in range(_firstweekday, _firstweekday + 7):
- days.append(names[i%7][:width].center(width))
- return ' '.join(days)
-
-def prmonth(theyear, themonth, w=0, l=0):
- """Print a month's calendar."""
- print month(theyear, themonth, w, l),
-
-def month(theyear, themonth, w=0, l=0):
- """Return a month's calendar string (multi-line)."""
- w = max(2, w)
- l = max(1, l)
- s = ("%s %r" % (month_name[themonth], theyear)).center(
- 7 * (w + 1) - 1).rstrip() + \
- '\n' * l + weekheader(w).rstrip() + '\n' * l
- for aweek in monthcalendar(theyear, themonth):
- s = s + week(aweek, w).rstrip() + '\n' * l
- return s[:-l] + '\n'
-
-# Spacing of month columns for 3-column year calendar
+ return s.center(width)
+
+ def formatweek(self, theweek, width):
+ """
+ Returns a single week in a string (no newline).
+ """
+ return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
+
+ def formatweekday(self, day, width):
+ """
+ Returns a formatted week day name.
+ """
+ if width >= 9:
+ names = day_name
+ else:
+ names = day_abbr
+ return names[day][:width].center(width)
+
+ def formatweekheader(self, width):
+ """
+ Return a header for a week.
+ """
+ return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
+
+ def formatmonthname(self, theyear, themonth, width, withyear=True):
+ """
+ Return a formatted month name.
+ """
+ s = month_name[themonth]
+ if withyear:
+ s = "%s %r" % (s, theyear)
+ return s.center(width)
+
+ def prmonth(self, theyear, themonth, w=0, l=0):
+ """
+ Print a month's calendar.
+ """
+ print self.formatmonth(theyear, themonth, w, l),
+
+ def formatmonth(self, theyear, themonth, w=0, l=0):
+ """
+ Return a month's calendar string (multi-line).
+ """
+ w = max(2, w)
+ l = max(1, l)
+ s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
+ s = s.rstrip()
+ s += '\n' * l
+ s += self.formatweekheader(w).rstrip()
+ s += '\n' * l
+ for week in self.monthdays2calendar(theyear, themonth):
+ s += self.formatweek(week, w).rstrip()
+ s += '\n' * l
+ return s
+
+ def formatyear(self, theyear, w=2, l=1, c=6, m=3):
+ """
+ Returns a year's calendar as a multi-line string.
+ """
+ w = max(2, w)
+ l = max(1, l)
+ c = max(2, c)
+ colwidth = (w + 1) * 7 - 1
+ v = []
+ a = v.append
+ a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
+ a('\n'*l)
+ header = self.formatweekheader(w)
+ for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
+ # months in this row
+ months = xrange(m*i+1, min(m*(i+1)+1, 13))
+ a('\n'*l)
+ names = (self.formatmonthname(theyear, k, colwidth, False)
+ for k in months)
+ a(formatstring(names, colwidth, c).rstrip())
+ a('\n'*l)
+ headers = (header for k in months)
+ a(formatstring(headers, colwidth, c).rstrip())
+ a('\n'*l)
+ # max number of weeks for this row
+ height = max(len(cal) for cal in row)
+ for j in xrange(height):
+ weeks = []
+ for cal in row:
+ if j >= len(cal):
+ weeks.append('')
+ else:
+ weeks.append(self.formatweek(cal[j], w))
+ a(formatstring(weeks, colwidth, c).rstrip())
+ a('\n' * l)
+ return ''.join(v)
+
+ def pryear(self, theyear, w=0, l=0, c=6, m=3):
+ """Print a year's calendar."""
+ print self.formatyear(theyear, w, l, c, m)
+
+
+class HTMLCalendar(Calendar):
+ """
+ This calendar returns complete HTML pages.
+ """
+
+ # CSS classes for the day <td>s
+ cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
+
+ def formatday(self, day, weekday):
+ """
+ Return a day as a table cell.
+ """
+ if day == 0:
+ return '<td class="noday">&nbsp;</td>' # day outside month
+ else:
+ return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
+
+ def formatweek(self, theweek):
+ """
+ Return a complete week as a table row.
+ """
+ s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
+ return '<tr>%s</tr>' % s
+
+ def formatweekday(self, day):
+ """
+ Return a weekday name as a table header.
+ """
+ return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
+
+ def formatweekheader(self):
+ """
+ Return a header for a week as a table row.
+ """
+ s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
+ return '<tr>%s</tr>' % s
+
+ def formatmonthname(self, theyear, themonth, withyear=True):
+ """
+ Return a month name as a table row.
+ """
+ if withyear:
+ s = '%s %s' % (month_name[themonth], theyear)
+ else:
+ s = '%s' % month_name[themonth]
+ return '<tr><th colspan="7" class="month">%s</th></tr>' % s
+
+ def formatmonth(self, theyear, themonth, withyear=True):
+ """
+ Return a formatted month as a table.
+ """
+ v = []
+ a = v.append
+ a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
+ a('\n')
+ a(self.formatmonthname(theyear, themonth, withyear=withyear))
+ a('\n')
+ a(self.formatweekheader())
+ a('\n')
+ for week in self.monthdays2calendar(theyear, themonth):
+ a(self.formatweek(week))
+ a('\n')
+ a('</table>')
+ a('\n')
+ return ''.join(v)
+
+ def formatyear(self, theyear, width=3):
+ """
+ Return a formatted year as a table of tables.
+ """
+ v = []
+ a = v.append
+ width = max(width, 1)
+ a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
+ a('\n')
+ a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
+ for i in xrange(January, January+12, width):
+ # months in this row
+ months = xrange(i, min(i+width, 13))
+ a('<tr>')
+ for m in months:
+ a('<td>')
+ a(self.formatmonth(theyear, m, withyear=False))
+ a('</td>')
+ a('</tr>')
+ a('</table>')
+ return ''.join(v)
+
+ def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
+ """
+ Return a formatted year as a complete HTML page.
+ """
+ if encoding is None:
+ encoding = sys.getdefaultencoding()
+ v = []
+ a = v.append
+ a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
+ a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
+ a('<html>\n')
+ a('<head>\n')
+ a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
+ if css is not None:
+ a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
+ a('<title>Calendar for %d</title\n' % theyear)
+ a('</head>\n')
+ a('<body>\n')
+ a(self.formatyear(theyear, width))
+ a('</body>\n')
+ a('</html>\n')
+ return ''.join(v).encode(encoding, "xmlcharrefreplace")
+
+
+class TimeEncoding:
+ def __init__(self, locale):
+ self.locale = locale
+
+ def __context__(self):
+ return self
+
+ def __enter__(self):
+ self.oldlocale = locale.setlocale(locale.LC_TIME, self.locale)
+ return locale.getlocale(locale.LC_TIME)[1]
+
+ def __exit__(self, *args):
+ locale.setlocale(locale.LC_TIME, self.oldlocale)
+
+
+class LocaleTextCalendar(TextCalendar):
+ """
+ This class can be passed a locale name in the constructor and will return
+ month and weekday names in the specified locale. If this locale includes
+ an encoding all strings containing month and weekday names will be returned
+ as unicode.
+ """
+
+ def __init__(self, firstweekday=0, locale=None):
+ TextCalendar.__init__(self, firstweekday)
+ if locale is None:
+ locale = locale.getdefaultlocale()
+ self.locale = locale
+
+ def formatweekday(self, day, width):
+ with TimeEncoding(self.locale) as encoding:
+ if width >= 9:
+ names = day_name
+ else:
+ names = day_abbr
+ name = names[day]
+ if encoding is not None:
+ name = name.decode(encoding)
+ return name[:width].center(width)
+
+ def formatmonthname(self, theyear, themonth, width, withyear=True):
+ with TimeEncoding(self.locale) as encoding:
+ s = month_name[themonth]
+ if encoding is not None:
+ s = s.decode(encoding)
+ if withyear:
+ s = "%s %r" % (s, theyear)
+ return s.center(width)
+
+
+class LocaleHTMLCalendar(HTMLCalendar):
+ """
+ This class can be passed a locale name in the constructor and will return
+ month and weekday names in the specified locale. If this locale includes
+ an encoding all strings containing month and weekday names will be returned
+ as unicode.
+ """
+ def __init__(self, firstweekday=0, locale=None):
+ HTMLCalendar.__init__(self, firstweekday)
+ if locale is None:
+ locale = locale.getdefaultlocale()
+ self.locale = locale
+
+ def formatweekday(self, day):
+ with TimeEncoding(self.locale) as encoding:
+ s = day_abbr[day]
+ if encoding is not None:
+ s = s.decode(encoding)
+ return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
+
+ def formatmonthname(self, theyear, themonth, withyear=True):
+ with TimeEncoding(self.locale) as encoding:
+ s = month_name[themonth]
+ if encoding is not None:
+ s = s.decode(encoding)
+ if withyear:
+ s = '%s %s' % (s, theyear)
+ return '<tr><th colspan="7" class="month">%s</th></tr>' % s
+
+
+# Support for old module level interface
+c = TextCalendar()
+
+firstweekday = c.getfirstweekday
+
+def setfirstweekday(firstweekday):
+ if not MONDAY <= firstweekday <= SUNDAY:
+ raise IllegalWeekdayError(firstweekday)
+ c.firstweekday = firstweekday
+
+monthcalendar = c.monthdayscalendar
+prweek = c.prweek
+week = c.formatweek
+weekheader = c.formatweekheader
+prmonth = c.prmonth
+month = c.formatmonth
+calendar = c.formatyear
+prcal = c.pryear
+
+
+# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
-def format3c(a, b, c, colwidth=_colwidth, spacing=_spacing):
- """Prints 3-column formatting for year calendars"""
- print format3cstring(a, b, c, colwidth, spacing)
-
-def format3cstring(a, b, c, colwidth=_colwidth, spacing=_spacing):
- """Returns a string formatted from 3 strings, centered within 3 columns."""
- return (a.center(colwidth) + ' ' * spacing + b.center(colwidth) +
- ' ' * spacing + c.center(colwidth))
-
-def prcal(year, w=0, l=0, c=_spacing):
- """Print a year's calendar."""
- print calendar(year, w, l, c),
-
-def calendar(year, w=0, l=0, c=_spacing):
- """Returns a year's calendar as a multi-line string."""
- w = max(2, w)
- l = max(1, l)
- c = max(2, c)
- colwidth = (w + 1) * 7 - 1
- s = repr(year).center(colwidth * 3 + c * 2).rstrip() + '\n' * l
- header = weekheader(w)
- header = format3cstring(header, header, header, colwidth, c).rstrip()
- for q in range(January, January+12, 3):
- s = (s + '\n' * l +
- format3cstring(month_name[q], month_name[q+1], month_name[q+2],
- colwidth, c).rstrip() +
- '\n' * l + header + '\n' * l)
- data = []
- height = 0
- for amonth in range(q, q + 3):
- cal = monthcalendar(year, amonth)
- if len(cal) > height:
- height = len(cal)
- data.append(cal)
- for i in range(height):
- weeks = []
- for cal in data:
- if i >= len(cal):
- weeks.append('')
- else:
- weeks.append(week(cal[i], w))
- s = s + format3cstring(weeks[0], weeks[1], weeks[2],
- colwidth, c).rstrip() + '\n' * l
- return s[:-l] + '\n'
+
+def format(cols, colwidth=_colwidth, spacing=_spacing):
+ """Prints multi-column formatting for year calendars"""
+ print formatstring(cols, colwidth, spacing)
+
+
+def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
+ """Returns a string formatted from n strings, centered within n columns."""
+ spacing *= ' '
+ return spacing.join(c.center(colwidth) for c in cols)
+
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
+
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
@@ -229,3 +608,97 @@ def timegm(tuple):
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
+
+
+def main(args):
+ import optparse
+ parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
+ parser.add_option(
+ "-w", "--width",
+ dest="width", type="int", default=2,
+ help="width of date column (default 2, text only)"
+ )
+ parser.add_option(
+ "-l", "--lines",
+ dest="lines", type="int", default=1,
+ help="number of lines for each week (default 1, text only)"
+ )
+ parser.add_option(
+ "-s", "--spacing",
+ dest="spacing", type="int", default=6,
+ help="spacing between months (default 6, text only)"
+ )
+ parser.add_option(
+ "-m", "--months",
+ dest="months", type="int", default=3,
+ help="months per row (default 3, text only)"
+ )
+ parser.add_option(
+ "-c", "--css",
+ dest="css", default="calendar.css",
+ help="CSS to use for page (html only)"
+ )
+ parser.add_option(
+ "-L", "--locale",
+ dest="locale", default=None,
+ help="locale to be used from month and weekday names"
+ )
+ parser.add_option(
+ "-e", "--encoding",
+ dest="encoding", default=None,
+ help="Encoding to use for output"
+ )
+ parser.add_option(
+ "-t", "--type",
+ dest="type", default="text",
+ choices=("text", "html"),
+ help="output type (text or html)"
+ )
+
+ (options, args) = parser.parse_args(args)
+
+ if options.locale and not options.encoding:
+ parser.error("if --locale is specified --encoding is required")
+ sys.exit(1)
+
+ if options.type == "html":
+ if options.locale:
+ cal = LocaleHTMLCalendar(locale=options.locale)
+ else:
+ cal = HTMLCalendar()
+ encoding = options.encoding
+ if encoding is None:
+ encoding = sys.getdefaultencoding()
+ optdict = dict(encoding=encoding, css=options.css)
+ if len(args) == 1:
+ print cal.formatyearpage(datetime.date.today().year, **optdict)
+ elif len(args) == 2:
+ print cal.formatyearpage(int(args[1]), **optdict)
+ else:
+ parser.error("incorrect number of arguments")
+ sys.exit(1)
+ else:
+ if options.locale:
+ cal = LocaleTextCalendar(locale=options.locale)
+ else:
+ cal = TextCalendar()
+ optdict = dict(w=options.width, l=options.lines)
+ if len(args) != 3:
+ optdict["c"] = options.spacing
+ optdict["m"] = options.months
+ if len(args) == 1:
+ result = cal.formatyear(datetime.date.today().year, **optdict)
+ elif len(args) == 2:
+ result = cal.formatyear(int(args[1]), **optdict)
+ elif len(args) == 3:
+ result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
+ else:
+ parser.error("incorrect number of arguments")
+ sys.exit(1)
+ if options.encoding:
+ result = result.encode(options.encoding)
+ print result
+
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/Lib/codecs.py b/Lib/codecs.py
index 28856c7..1518d75 100644
--- a/Lib/codecs.py
+++ b/Lib/codecs.py
@@ -14,8 +14,7 @@ import __builtin__, sys
try:
from _codecs import *
except ImportError, why:
- raise SystemError,\
- 'Failed to load the builtin codecs: %s' % why
+ raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
@@ -156,13 +155,13 @@ class Codec:
class IncrementalEncoder(object):
"""
- A IncrementalEncoder encodes an input in multiple steps. The input can be
+ An IncrementalEncoder encodes an input in multiple steps. The input can be
passed piece by piece to the encode() method. The IncrementalEncoder remembers
the state of the Encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
- Creates a IncrementalEncoder instance.
+ Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
@@ -182,6 +181,33 @@ class IncrementalEncoder(object):
Resets the encoder to the initial state.
"""
+class BufferedIncrementalEncoder(IncrementalEncoder):
+ """
+ This subclass of IncrementalEncoder can be used as the baseclass for an
+ incremental encoder if the encoder must keep some of the output in a
+ buffer between calls to encode().
+ """
+ def __init__(self, errors='strict'):
+ IncrementalEncoder.__init__(self, errors)
+ self.buffer = "" # unencoded input that is kept between calls to encode()
+
+ def _buffer_encode(self, input, errors, final):
+ # Overwrite this method in subclasses: It must encode input
+ # and return an (output, length consumed) tuple
+ raise NotImplementedError
+
+ def encode(self, input, final=False):
+ # encode input (taking the buffer into account)
+ data = self.buffer + input
+ (result, consumed) = self._buffer_encode(data, self.errors, final)
+ # keep unencoded input until the next call
+ self.buffer = data[consumed:]
+ return result
+
+ def reset(self):
+ IncrementalEncoder.reset(self)
+ self.buffer = ""
+
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can be
@@ -234,7 +260,7 @@ class BufferedIncrementalDecoder(IncrementalDecoder):
def reset(self):
IncrementalDecoder.reset(self)
- self.bytebuffer = ""
+ self.buffer = ""
#
# The StreamWriter and StreamReader class provide generic working
diff --git a/Lib/contextlib.py b/Lib/contextlib.py
index 0a5d608..aa5335d 100644
--- a/Lib/contextlib.py
+++ b/Lib/contextlib.py
@@ -30,9 +30,22 @@ class GeneratorContextManager(object):
else:
try:
self.gen.throw(type, value, traceback)
- return True
- except StopIteration:
- return True
+ raise RuntimeError("generator didn't stop after throw()")
+ except StopIteration, exc:
+ # Suppress the exception *unless* it's the same exception that
+ # was passed to throw(). This prevents a StopIteration
+ # raised inside the "with" statement from being suppressed
+ return exc is not value
+ except:
+ # only re-raise if it's *not* the exception that was
+ # passed to throw(), because __exit__() must not raise
+ # an exception unless __exit__() itself failed. But throw()
+ # has to raise the exception to signal propagation, so this
+ # fixes the impedance mismatch between the throw() protocol
+ # and the __exit__() protocol.
+ #
+ if sys.exc_info()[1] is not value:
+ raise
def contextmanager(func):
@@ -68,6 +81,7 @@ def contextmanager(func):
try:
helper.__name__ = func.__name__
helper.__doc__ = func.__doc__
+ helper.__dict__ = func.__dict__
except:
pass
return helper
diff --git a/Lib/copy_reg.py b/Lib/copy_reg.py
index 169520d..f4661ed 100644
--- a/Lib/copy_reg.py
+++ b/Lib/copy_reg.py
@@ -111,8 +111,19 @@ def _slotnames(cls):
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
- names += [name for name in c.__dict__["__slots__"]
- if name not in ("__dict__", "__weakref__")]
+ slots = c.__dict__['__slots__']
+ # if class has a single slot, it can be given as a string
+ if isinstance(slots, basestring):
+ slots = (slots,)
+ for name in slots:
+ # special descriptors
+ if name in ("__dict__", "__weakref__"):
+ continue
+ # mangled names
+ elif name.startswith('__') and not name.endswith('__'):
+ names.append('_%s%s' % (c.__name__, name))
+ else:
+ names.append(name)
# Cache the outcome in the class if at all possible
try:
diff --git a/Lib/ctypes/__init__.py b/Lib/ctypes/__init__.py
index dd0f640..28ac180 100644
--- a/Lib/ctypes/__init__.py
+++ b/Lib/ctypes/__init__.py
@@ -9,11 +9,7 @@ from _ctypes import Union, Structure, Array
from _ctypes import _Pointer
from _ctypes import CFuncPtr as _CFuncPtr
from _ctypes import __version__ as _ctypes_version
-try:
- from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
-except (ImportError, AttributeError):
- RTLD_GLOBAL = RTLD_LOCAL = None
-
+from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
from _ctypes import ArgumentError
from struct import calcsize as _calcsize
@@ -304,10 +300,11 @@ class CDLL(object):
raise AttributeError, name
return self.__getitem__(name)
- def __getitem__(self, name):
- func = self._FuncPtr(name, self)
- func.__name__ = name
- setattr(self, name, func)
+ def __getitem__(self, name_or_ordinal):
+ func = self._FuncPtr((name_or_ordinal, self))
+ if not isinstance(name_or_ordinal, (int, long)):
+ func.__name__ = name_or_ordinal
+ setattr(self, name_or_ordinal, func)
return func
class PyDLL(CDLL):
@@ -384,21 +381,29 @@ if _os.name in ("nt", "ce"):
_pointer_type_cache[None] = c_void_p
-# functions
-
-from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, cast
-
if sizeof(c_uint) == sizeof(c_void_p):
c_size_t = c_uint
elif sizeof(c_ulong) == sizeof(c_void_p):
c_size_t = c_ulong
+# functions
+
+from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
+
## void *memmove(void *, const void *, size_t);
memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
## void *memset(void *, int, size_t)
memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
+def PYFUNCTYPE(restype, *argtypes):
+ class CFunctionType(_CFuncPtr):
+ _argtypes_ = argtypes
+ _restype_ = restype
+ _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
+ return CFunctionType
+cast = PYFUNCTYPE(py_object, c_void_p, py_object)(_cast_addr)
+
_string_at = CFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
def string_at(ptr, size=0):
"""string_at(addr[, size]) -> string
diff --git a/Lib/ctypes/_loader.py b/Lib/ctypes/_loader.py
index 7bde6c6..7a48c1c 100644
--- a/Lib/ctypes/_loader.py
+++ b/Lib/ctypes/_loader.py
@@ -1,14 +1,11 @@
-# WORK IN PROGRESS! DO NOT (yet) USE!
import sys, os
import ctypes
-__all__ = ["LibraryLoader", "RTLD_LOCAL", "RTLD_GLOBAL"]
-
if os.name in ("nt", "ce"):
from _ctypes import LoadLibrary as dlopen
- RTLD_LOCAL = RTLD_GLOBAL = None
else:
- from _ctypes import dlopen, RTLD_LOCAL, RTLD_GLOBAL
+ from _ctypes import dlopen
+from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
# _findLib(name) returns an iterable of possible names for a library.
if os.name in ("nt", "ce"):
@@ -56,7 +53,10 @@ elif os.name == "posix":
expr = '/[^\(\)\s]*lib%s\.[^\(\)\s]*' % name
res = re.search(expr, os.popen('/sbin/ldconfig -p 2>/dev/null').read())
if not res:
- return None
+ cmd = 'ldd %s 2>/dev/null' % sys.executable
+ res = re.search(expr, os.popen(cmd).read())
+ if not res:
+ return None
return res.group(0)
def _get_soname(f):
diff --git a/Lib/ctypes/test/test_byteswap.py b/Lib/ctypes/test/test_byteswap.py
index 1b31f90..d0ada40 100644
--- a/Lib/ctypes/test/test_byteswap.py
+++ b/Lib/ctypes/test/test_byteswap.py
@@ -149,7 +149,7 @@ class Test(unittest.TestCase):
self.failUnless(c_char.__ctype_le__ is c_char)
self.failUnless(c_char.__ctype_be__ is c_char)
- def test_struct_fields(self):
+ def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
@@ -198,17 +198,20 @@ class Test(unittest.TestCase):
pass
self.assertRaises(TypeError, setattr, S, "_fields_", [("s", T)])
- # crashes on solaris with a core dump.
- def X_test_struct_fields(self):
+ def test_struct_fields_2(self):
+ # standard packing in struct uses no alignment.
+ # So, we have to align using pad bytes.
+ #
+ # Unaligned accesses will crash Python (on those platforms that
+ # don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
- fmt = ">bhid"
+ fmt = ">bxhid"
else:
base = LittleEndianStructure
- fmt = "<bhid"
+ fmt = "<bxhid"
class S(base):
- _pack_ = 1 # struct with '<' or '>' uses standard alignment.
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
@@ -218,5 +221,60 @@ class Test(unittest.TestCase):
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.failUnlessEqual(bin(s1), bin(s2))
+ def test_unaligned_nonnative_struct_fields(self):
+ if sys.byteorder == "little":
+ base = BigEndianStructure
+ fmt = ">b h xi xd"
+ else:
+ base = LittleEndianStructure
+ fmt = "<b h xi xd"
+
+ class S(base):
+ _pack_ = 1
+ _fields_ = [("b", c_byte),
+
+ ("h", c_short),
+
+ ("_1", c_byte),
+ ("i", c_int),
+
+ ("_2", c_byte),
+ ("d", c_double)]
+
+ s1 = S()
+ s1.b = 0x12
+ s1.h = 0x1234
+ s1.i = 0x12345678
+ s1.d = 3.14
+ s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
+ self.failUnlessEqual(bin(s1), bin(s2))
+
+ def test_unaligned_native_struct_fields(self):
+ if sys.byteorder == "little":
+ fmt = "<b h xi xd"
+ else:
+ base = LittleEndianStructure
+ fmt = ">b h xi xd"
+
+ class S(Structure):
+ _pack_ = 1
+ _fields_ = [("b", c_byte),
+
+ ("h", c_short),
+
+ ("_1", c_byte),
+ ("i", c_int),
+
+ ("_2", c_byte),
+ ("d", c_double)]
+
+ s1 = S()
+ s1.b = 0x12
+ s1.h = 0x1234
+ s1.i = 0x12345678
+ s1.d = 3.14
+ s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
+ self.failUnlessEqual(bin(s1), bin(s2))
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_cfuncs.py b/Lib/ctypes/test/test_cfuncs.py
index 7c2b28b..6e0798d 100644
--- a/Lib/ctypes/test/test_cfuncs.py
+++ b/Lib/ctypes/test/test_cfuncs.py
@@ -179,7 +179,7 @@ else:
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError, name
- func = self._FuncPtr("s_" + name, self)
+ func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
diff --git a/Lib/ctypes/test/test_keeprefs.py b/Lib/ctypes/test/test_keeprefs.py
index 39e70e3..7318f29 100644
--- a/Lib/ctypes/test/test_keeprefs.py
+++ b/Lib/ctypes/test/test_keeprefs.py
@@ -140,5 +140,10 @@ class PointerToStructure(unittest.TestCase):
r.a[0].x = 42
r.a[0].y = 99
+ # to avoid leaking when tests are run several times
+ # clean up the types left in the cache.
+ from ctypes import _pointer_type_cache
+ del _pointer_type_cache[POINT]
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_loading.py b/Lib/ctypes/test/test_loading.py
index 80564b8..4558417 100644
--- a/Lib/ctypes/test/test_loading.py
+++ b/Lib/ctypes/test/test_loading.py
@@ -2,40 +2,72 @@ from ctypes import *
import sys, unittest
import os, StringIO
+libc_name = None
+if os.name == "nt":
+ libc_name = "msvcrt"
+elif os.name == "ce":
+ libc_name = "coredll"
+elif sys.platform == "darwin":
+ libc_name = "libc.dylib"
+elif sys.platform == "cygwin":
+ libc_name = "cygwin1.dll"
+else:
+ for line in os.popen("ldd %s" % sys.executable):
+ if "libc.so" in line:
+ if sys.platform == "openbsd3":
+ libc_name = line.split()[4]
+ else:
+ libc_name = line.split()[2]
+## print "libc_name is", libc_name
+ break
+
class LoaderTest(unittest.TestCase):
unknowndll = "xxrandomnamexx"
- def test_load(self):
- if os.name == "nt":
- name = "msvcrt"
- elif os.name == "ce":
- name = "coredll"
- elif sys.platform == "darwin":
- name = "libc.dylib"
- elif sys.platform.startswith("freebsd"):
- name = "libc.so"
- elif sys.platform == "sunos5":
- name = "libc.so"
- else:
- name = "libc.so.6"
- cdll.load(name)
- self.assertRaises(OSError, cdll.load, self.unknowndll)
-
- def test_load_version(self):
- version = "6"
- name = "c"
- if sys.platform == "linux2":
- cdll.load_version(name, version)
+ if libc_name is not None:
+ def test_load(self):
+ cdll.load(libc_name)
+ cdll.load(os.path.basename(libc_name))
+ self.assertRaises(OSError, cdll.load, self.unknowndll)
+
+ if libc_name is not None and os.path.basename(libc_name) == "libc.so.6":
+ def test_load_version(self):
+ cdll.load_version("c", "6")
# linux uses version, libc 9 should not exist
- self.assertRaises(OSError, cdll.load_version, name, "9")
- self.assertRaises(OSError, cdll.load_version, self.unknowndll, "")
+ self.assertRaises(OSError, cdll.load_version, "c", "9")
+ self.assertRaises(OSError, cdll.load_version, self.unknowndll, "")
- if os.name == "posix" and sys.platform != "sunos5":
def test_find(self):
name = "c"
cdll.find(name)
self.assertRaises(OSError, cdll.find, self.unknowndll)
+ if os.name in ("nt", "ce"):
+ def test_load_library(self):
+ if os.name == "nt":
+ windll.load_library("kernel32").GetModuleHandleW
+ windll.LoadLibrary("kernel32").GetModuleHandleW
+ WinDLL("kernel32").GetModuleHandleW
+ elif os.name == "ce":
+ windll.load_library("coredll").GetModuleHandleW
+ windll.LoadLibrary("coredll").GetModuleHandleW
+ WinDLL("coredll").GetModuleHandleW
+
+ def test_load_ordinal_functions(self):
+ import _ctypes_test
+ dll = WinDLL(_ctypes_test.__file__)
+ # We load the same function both via ordinal and name
+ func_ord = dll[2]
+ func_name = dll.GetString
+ # addressof gets the address where the function pointer is stored
+ a_ord = addressof(func_ord)
+ a_name = addressof(func_name)
+ f_ord_addr = c_void_p.from_address(a_ord).value
+ f_name_addr = c_void_p.from_address(a_name).value
+ self.failUnlessEqual(hex(f_ord_addr), hex(f_name_addr))
+
+ self.failUnlessRaises(AttributeError, dll.__getitem__, 1234)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_pointers.py b/Lib/ctypes/test/test_pointers.py
index 6172abb..3a324a6 100644
--- a/Lib/ctypes/test/test_pointers.py
+++ b/Lib/ctypes/test/test_pointers.py
@@ -166,6 +166,18 @@ class PointersTestCase(unittest.TestCase):
result = func( byref(argc), argv )
assert result == 'world', result
+ def test_bug_1467852(self):
+ # http://sourceforge.net/tracker/?func=detail&atid=532154&aid=1467852&group_id=71702
+ x = c_int(5)
+ dummy = []
+ for i in range(32000):
+ dummy.append(c_int(i))
+ y = c_int(6)
+ p = pointer(x)
+ pp = pointer(p)
+ q = pointer(y)
+ pp[0] = q # <==
+ self.failUnlessEqual(p[0], 6)
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/ctypes/test/test_posix.py b/Lib/ctypes/test/test_posix.py
index 2b4fdff..fe0a40a 100644
--- a/Lib/ctypes/test/test_posix.py
+++ b/Lib/ctypes/test/test_posix.py
@@ -8,8 +8,10 @@ if os.name == "posix" and sys.platform == "linux2":
class TestRTLD_GLOBAL(unittest.TestCase):
def test_GL(self):
- cdll.load('libGL.so', mode=RTLD_GLOBAL)
- cdll.load('libGLU.so')
+ if os.path.exists('/usr/lib/libGL.so'):
+ cdll.load('libGL.so', mode=RTLD_GLOBAL)
+ if os.path.exists('/usr/lib/libGLU.so'):
+ cdll.load('libGLU.so')
##if os.name == "posix" and sys.platform != "darwin":
diff --git a/Lib/ctypes/test/test_prototypes.py b/Lib/ctypes/test/test_prototypes.py
index 2c3d75b..47f5da1 100644
--- a/Lib/ctypes/test/test_prototypes.py
+++ b/Lib/ctypes/test/test_prototypes.py
@@ -24,6 +24,19 @@ import unittest
import _ctypes_test
testdll = cdll.load(_ctypes_test.__file__)
+# Return machine address `a` as a (possibly long) non-negative integer.
+# Starting with Python 2.5, id(anything) is always non-negative, and
+# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
+def positive_address(a):
+ if a >= 0:
+ return a
+ # View the bits in `a` as unsigned instead.
+ import struct
+ num_bits = struct.calcsize("P") * 8 # num bits in native machine address
+ a += 1L << num_bits
+ assert a >= 0
+ return a
+
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
@@ -43,7 +56,8 @@ class CharPointersTestCase(unittest.TestCase):
ci = c_int(0)
func.argtypes = POINTER(c_int),
- self.failUnlessEqual(addressof(ci), func(byref(ci)))
+ self.failUnlessEqual(positive_address(addressof(ci)),
+ positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
diff --git a/Lib/ctypes/test/test_random_things.py b/Lib/ctypes/test/test_random_things.py
index cd50ca8..78a665b 100644
--- a/Lib/ctypes/test/test_random_things.py
+++ b/Lib/ctypes/test/test_random_things.py
@@ -51,16 +51,14 @@ class CallbackTracbackTestCase(unittest.TestCase):
def test_IntegerDivisionError(self):
cb = CFUNCTYPE(c_int, c_int)(callback_func)
out = self.capture_stderr(cb, 0)
- self.failUnlessEqual(out.splitlines()[-1],
- "ZeroDivisionError: "
- "integer division or modulo by zero")
+ self.failUnlessEqual(out.splitlines()[-1][:19],
+ "ZeroDivisionError: ")
def test_FloatDivisionError(self):
cb = CFUNCTYPE(c_int, c_double)(callback_func)
out = self.capture_stderr(cb, 0.0)
- self.failUnlessEqual(out.splitlines()[-1],
- "ZeroDivisionError: "
- "float division")
+ self.failUnlessEqual(out.splitlines()[-1][:19],
+ "ZeroDivisionError: ")
def test_TypeErrorDivisionError(self):
cb = CFUNCTYPE(c_int, c_char_p)(callback_func)
diff --git a/Lib/ctypes/test/test_sizes.py b/Lib/ctypes/test/test_sizes.py
index 6fb9ca0..208c00e 100644
--- a/Lib/ctypes/test/test_sizes.py
+++ b/Lib/ctypes/test/test_sizes.py
@@ -20,5 +20,8 @@ class SizesTestCase(unittest.TestCase):
self.failUnlessEqual(8, sizeof(c_int64))
self.failUnlessEqual(8, sizeof(c_uint64))
+ def test_size_t(self):
+ self.failUnlessEqual(sizeof(c_void_p), sizeof(c_size_t))
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_unaligned_structures.py b/Lib/ctypes/test/test_unaligned_structures.py
new file mode 100644
index 0000000..89343ba
--- /dev/null
+++ b/Lib/ctypes/test/test_unaligned_structures.py
@@ -0,0 +1,45 @@
+import sys, unittest
+from ctypes import *
+
+structures = []
+byteswapped_structures = []
+
+
+if sys.byteorder == "little":
+ SwappedStructure = BigEndianStructure
+else:
+ SwappedStructure = LittleEndianStructure
+
+for typ in [c_short, c_int, c_long, c_longlong,
+ c_float, c_double,
+ c_ushort, c_uint, c_ulong, c_ulonglong]:
+ class X(Structure):
+ _pack_ = 1
+ _fields_ = [("pad", c_byte),
+ ("value", typ)]
+ class Y(SwappedStructure):
+ _pack_ = 1
+ _fields_ = [("pad", c_byte),
+ ("value", typ)]
+ structures.append(X)
+ byteswapped_structures.append(Y)
+
+class TestStructures(unittest.TestCase):
+ def test_native(self):
+ for typ in structures:
+## print typ.value
+ self.failUnlessEqual(typ.value.offset, 1)
+ o = typ()
+ o.value = 4
+ self.failUnlessEqual(o.value, 4)
+
+ def test_swapped(self):
+ for typ in byteswapped_structures:
+## print >> sys.stderr, typ.value
+ self.failUnlessEqual(typ.value.offset, 1)
+ o = typ()
+ o.value = 4
+ self.failUnlessEqual(o.value, 4)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py
index 6ea5d57..5771252 100644
--- a/Lib/distutils/command/build_ext.py
+++ b/Lib/distutils/command/build_ext.py
@@ -185,7 +185,9 @@ class build_ext (Command):
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
- if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
+ if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos' or \
+ (sys.platform.startswith('linux') and
+ sysconfig.get_config_var('Py_ENABLE_SHARED')):
if string.find(sys.executable, sys.exec_prefix) != -1:
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
@@ -688,6 +690,13 @@ class build_ext (Command):
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
else:
- return ext.libraries
+ from distutils import sysconfig
+ if sysconfig.get_config_var('Py_ENABLE_SHARED'):
+ template = "python%d.%d"
+ pythonlib = (template %
+ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+ return ext.libraries + [pythonlib]
+ else:
+ return ext.libraries
# class build_ext
diff --git a/Lib/distutils/command/install.py b/Lib/distutils/command/install.py
index 7723761..453151d 100644
--- a/Lib/distutils/command/install.py
+++ b/Lib/distutils/command/install.py
@@ -601,6 +601,7 @@ class install (Command):
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
+ ('install_egg_info', lambda self:True),
]
# class install
diff --git a/Lib/distutils/command/install_egg_info.py b/Lib/distutils/command/install_egg_info.py
new file mode 100644
index 0000000..c31ac29
--- /dev/null
+++ b/Lib/distutils/command/install_egg_info.py
@@ -0,0 +1,75 @@
+"""distutils.command.install_egg_info
+
+Implements the Distutils 'install_egg_info' command, for installing
+a package's PKG-INFO metadata."""
+
+
+from distutils.cmd import Command
+from distutils import log, dir_util
+import os, sys, re
+
+class install_egg_info(Command):
+ """Install an .egg-info file for the package"""
+
+ description = "Install package's PKG-INFO metadata as an .egg-info file"
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',('install_dir','install_dir'))
+ basename = "%s-%s-py%s.egg-info" % (
+ to_filename(safe_name(self.distribution.get_name())),
+ to_filename(safe_version(self.distribution.get_version())),
+ sys.version[:3]
+ )
+ self.target = os.path.join(self.install_dir, basename)
+ self.outputs = [self.target]
+
+ def run(self):
+ target = self.target
+ if os.path.isdir(target) and not os.path.islink(target):
+ dir_util.remove_tree(target, dry_run=self.dry_run)
+ elif os.path.exists(target):
+ self.execute(os.unlink,(self.target,),"Removing "+target)
+ log.info("Writing %s", target)
+ if not self.dry_run:
+ f = open(target, 'w')
+ self.distribution.metadata.write_pkg_file(f)
+ f.close()
+
+ def get_outputs(self):
+ return self.outputs
+
+
+# The following routines are taken from setuptools' pkg_resources module and
+# can be replaced by importing them from pkg_resources once it is included
+# in the stdlib.
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """Convert an arbitrary string to a standard version string
+
+ Spaces become dots, and all other non-alphanumeric characters become
+ dashes, with runs of multiple dashes condensed to a single dash.
+ """
+ version = version.replace(' ','.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-','_')
diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py
index 62767a3..6f4ce81 100644
--- a/Lib/distutils/command/upload.py
+++ b/Lib/distutils/command/upload.py
@@ -29,6 +29,7 @@ class upload(Command):
'display full response text from server'),
('sign', 's',
'sign files to upload using gpg'),
+ ('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = ['show-response', 'sign']
@@ -38,8 +39,13 @@ class upload(Command):
self.repository = ''
self.show_response = 0
self.sign = False
+ self.identity = None
def finalize_options(self):
+ if self.identity and not self.sign:
+ raise DistutilsOptionError(
+ "Must use --sign for --identity to have meaning"
+ )
if os.environ.has_key('HOME'):
rc = os.path.join(os.environ['HOME'], '.pypirc')
if os.path.exists(rc):
@@ -67,7 +73,10 @@ class upload(Command):
def upload_file(self, command, pyversion, filename):
# Sign if requested
if self.sign:
- spawn(("gpg", "--detach-sign", "-a", filename),
+ gpg_args = ["gpg", "--detach-sign", "-a", filename]
+ if self.identity:
+ gpg_args[2:2] = ["--local-user", self.identity]
+ spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
diff --git a/Lib/distutils/log.py b/Lib/distutils/log.py
index cf3ee13..95d4c1c 100644
--- a/Lib/distutils/log.py
+++ b/Lib/distutils/log.py
@@ -20,7 +20,12 @@ class Log:
def _log(self, level, msg, args):
if level >= self.threshold:
- print msg % args
+ if not args:
+ # msg may contain a '%'. If args is empty,
+ # don't even try to string-format
+ print msg
+ else:
+ print msg % args
sys.stdout.flush()
def log(self, level, msg, *args):
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index dc603be..49536f0 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -31,7 +31,7 @@ landmark = os.path.join(argv0_path, "Modules", "Setup")
python_build = os.path.isfile(landmark)
-del argv0_path, landmark
+del landmark
def get_python_version():
@@ -185,7 +185,7 @@ def customize_compiler(compiler):
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
- inc_dir = os.curdir
+ inc_dir = argv0_path
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
@@ -213,8 +213,8 @@ def parse_config_h(fp, g=None):
"""
if g is None:
g = {}
- define_rx = re.compile("#define ([A-Z][A-Z0-9_]+) (.*)\n")
- undef_rx = re.compile("/[*] #undef ([A-Z][A-Z0-9_]+) [*]/\n")
+ define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
+ undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
@@ -351,6 +351,17 @@ def _init_posix():
raise DistutilsPlatformError(my_msg)
+ # load the installed pyconfig.h:
+ try:
+ filename = get_config_h_filename()
+ parse_config_h(file(filename), g)
+ except IOError, msg:
+ my_msg = "invalid Python installation: unable to open %s" % filename
+ if hasattr(msg, "strerror"):
+ my_msg = my_msg + " (%s)" % msg.strerror
+
+ raise DistutilsPlatformError(my_msg)
+
# On MacOSX we need to check the setting of the environment variable
# MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
# it needs to be compatible.
@@ -361,7 +372,7 @@ def _init_posix():
if cur_target == '':
cur_target = cfg_target
os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
- if cfg_target != cur_target:
+ elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')):
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
% (cur_target, cfg_target))
raise DistutilsPlatformError(my_msg)
diff --git a/Lib/doctest.py b/Lib/doctest.py
index 6244fae..70c355a 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -236,6 +236,15 @@ def _normalize_module(module, depth=2):
else:
raise TypeError("Expected a module, string, or None")
+def _load_testfile(filename, package, module_relative):
+ if module_relative:
+ package = _normalize_module(package, 3)
+ filename = _module_relative_path(package, filename)
+ if hasattr(package, '__loader__'):
+ if hasattr(package.__loader__, 'get_data'):
+ return package.__loader__.get_data(filename), filename
+ return open(filename).read(), filename
+
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
@@ -1319,13 +1328,13 @@ class DocTestRunner:
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
- def __patched_linecache_getlines(self, filename):
+ def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
- return self.save_linecache_getlines(filename)
+ return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
@@ -1933,9 +1942,7 @@ def testfile(filename, module_relative=True, name=None, package=None,
"relative paths.")
# Relativize the path
- if module_relative:
- package = _normalize_module(package)
- filename = _module_relative_path(package, filename)
+ text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
@@ -1955,8 +1962,7 @@ def testfile(filename, module_relative=True, name=None, package=None,
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
- s = open(filename).read()
- test = parser.get_doctest(s, globs, name, filename, 0)
+ test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
@@ -2336,15 +2342,13 @@ def DocFileTest(path, module_relative=True, package=None,
"relative paths.")
# Relativize the path.
- if module_relative:
- package = _normalize_module(package)
- path = _module_relative_path(package, path)
+ doc, path = _load_testfile(path, package, module_relative)
+
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
- doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
diff --git a/Lib/dummy_thread.py b/Lib/dummy_thread.py
index fb3abbf..d69d840 100644
--- a/Lib/dummy_thread.py
+++ b/Lib/dummy_thread.py
@@ -113,6 +113,14 @@ class LockType(object):
self.locked_status = True
return True
+ __enter__ = acquire
+
+ def __exit__(self, typ, val, tb):
+ self.release()
+
+ def __context__(self):
+ return self
+
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
diff --git a/Lib/easy_install.py b/Lib/easy_install.py
new file mode 100644
index 0000000..d87e984
--- /dev/null
+++ b/Lib/easy_install.py
@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+ from setuptools.command.easy_install import main
+ main()
diff --git a/Lib/email/__init__.py b/Lib/email/__init__.py
index e5c0e2e..f01260f 100644
--- a/Lib/email/__init__.py
+++ b/Lib/email/__init__.py
@@ -4,9 +4,10 @@
"""A package for parsing, handling, and generating email messages."""
-__version__ = '3.0.1'
+__version__ = '4.0a2'
__all__ = [
+ # Old names
'base64MIME',
'Charset',
'Encoders',
@@ -27,6 +28,19 @@ __all__ = [
'Utils',
'message_from_string',
'message_from_file',
+ # new names
+ 'base64mime',
+ 'charset',
+ 'encoders',
+ 'errors',
+ 'generator',
+ 'header',
+ 'iterators',
+ 'message',
+ 'mime',
+ 'parser',
+ 'quoprimime',
+ 'utils',
]
@@ -39,7 +53,7 @@ def message_from_string(s, *args, **kws):
Optional _class and strict are passed to the Parser constructor.
"""
- from email.Parser import Parser
+ from email.parser import Parser
return Parser(*args, **kws).parsestr(s)
@@ -48,5 +62,62 @@ def message_from_file(fp, *args, **kws):
Optional _class and strict are passed to the Parser constructor.
"""
- from email.Parser import Parser
+ from email.parser import Parser
return Parser(*args, **kws).parse(fp)
+
+
+
+# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
+# email 4.0 module names), to old-style names (email 3.0 module names).
+import sys
+
+class LazyImporter(object):
+ def __init__(self, module_name):
+ self.__name__ = 'email.' + module_name
+
+ def __getattr__(self, name):
+ __import__(self.__name__)
+ mod = sys.modules[self.__name__]
+ self.__dict__.update(mod.__dict__)
+ return getattr(mod, name)
+
+
+_LOWERNAMES = [
+ # email.<old name> -> email.<new name is lowercased old name>
+ 'Charset',
+ 'Encoders',
+ 'Errors',
+ 'FeedParser',
+ 'Generator',
+ 'Header',
+ 'Iterators',
+ 'Message',
+ 'Parser',
+ 'Utils',
+ 'base64MIME',
+ 'quopriMIME',
+ ]
+
+_MIMENAMES = [
+ # email.MIME<old name> -> email.mime.<new name is lowercased old name>
+ 'Audio',
+ 'Base',
+ 'Image',
+ 'Message',
+ 'Multipart',
+ 'NonMultipart',
+ 'Text',
+ ]
+
+for _name in _LOWERNAMES:
+ importer = LazyImporter(_name.lower())
+ sys.modules['email.' + _name] = importer
+ setattr(sys.modules['email'], _name, importer)
+
+
+import email.mime
+for _name in _MIMENAMES:
+ importer = LazyImporter('mime.' + _name.lower())
+ sys.modules['email.MIME' + _name] = importer
+ setattr(sys.modules['email'], 'MIME' + _name, importer)
+ setattr(sys.modules['email.mime'], _name, importer)
diff --git a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
index 7d759ef..5821ddf 100644
--- a/Lib/email/_parseaddr.py
+++ b/Lib/email/_parseaddr.py
@@ -6,6 +6,13 @@
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
+__all__ = [
+ 'mktime_tz',
+ 'parsedate',
+ 'parsedate_tz',
+ 'quote',
+ ]
+
import time
SPACE = ' '
@@ -117,7 +124,8 @@ def parsedate_tz(data):
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
- return yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset
+ # Daylight Saving Time flag is set to -1, since DST is unknown.
+ return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
diff --git a/Lib/email/base64MIME.py b/Lib/email/base64mime.py
index 6ed1d53..0129d9d 100644
--- a/Lib/email/base64MIME.py
+++ b/Lib/email/base64mime.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2004 Python Software Foundation
+# Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
@@ -24,9 +24,21 @@ decoding. To deal with the various line wrapping issues, use the email.Header
module.
"""
+__all__ = [
+ 'base64_len',
+ 'body_decode',
+ 'body_encode',
+ 'decode',
+ 'decodestring',
+ 'encode',
+ 'encodestring',
+ 'header_encode',
+ ]
+
import re
+
from binascii import b2a_base64, a2b_base64
-from email.Utils import fix_eols
+from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
diff --git a/Lib/email/Charset.py b/Lib/email/charset.py
index fd4043b..8f218b2 100644
--- a/Lib/email/Charset.py
+++ b/Lib/email/charset.py
@@ -2,9 +2,18 @@
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
-import email.base64MIME
-import email.quopriMIME
-from email.Encoders import encode_7or8bit
+__all__ = [
+ 'Charset',
+ 'add_alias',
+ 'add_charset',
+ 'add_codec',
+ ]
+
+import email.base64mime
+import email.quoprimime
+
+from email import errors
+from email.encoders import encode_7or8bit
@@ -186,8 +195,17 @@ class Charset:
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
- # unicode because its .lower() is locale insensitive.
- input_charset = unicode(input_charset, 'ascii').lower()
+ # unicode because its .lower() is locale insensitive. If the argument
+ # is already a unicode, we leave it at that, but ensure that the
+ # charset is ASCII, as the standard (RFC XXX) requires.
+ try:
+ if isinstance(input_charset, unicode):
+ input_charset.encode('ascii')
+ else:
+ input_charset = unicode(input_charset, 'ascii')
+ except UnicodeError:
+ raise errors.CharsetError(input_charset)
+ input_charset = input_charset.lower()
# Set the input charset after filtering through the aliases
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
@@ -307,12 +325,12 @@ class Charset:
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
- return email.base64MIME.base64_len(s) + len(cset) + MISC_LEN
+ return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
- return email.quopriMIME.header_quopri_len(s) + len(cset) + MISC_LEN
+ return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
- lenb64 = email.base64MIME.base64_len(s)
- lenqp = email.quopriMIME.header_quopri_len(s)
+ lenb64 = email.base64mime.base64_len(s)
+ lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
@@ -335,16 +353,16 @@ class Charset:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
- return email.base64MIME.header_encode(s, cset)
+ return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
- return email.quopriMIME.header_encode(s, cset, maxlinelen=None)
+ return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
- lenb64 = email.base64MIME.base64_len(s)
- lenqp = email.quopriMIME.header_quopri_len(s)
+ lenb64 = email.base64mime.base64_len(s)
+ lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
- return email.base64MIME.header_encode(s, cset)
+ return email.base64mime.header_encode(s, cset)
else:
- return email.quopriMIME.header_encode(s, cset, maxlinelen=None)
+ return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
@@ -363,8 +381,8 @@ class Charset:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
- return email.base64MIME.body_encode(s)
+ return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
- return email.quopriMIME.body_encode(s)
+ return email.quoprimime.body_encode(s)
else:
return s
diff --git a/Lib/email/Encoders.py b/Lib/email/encoders.py
index baac2a3..06016cd 100644
--- a/Lib/email/Encoders.py
+++ b/Lib/email/encoders.py
@@ -1,12 +1,22 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Encodings and related functions."""
+__all__ = [
+ 'encode_7or8bit',
+ 'encode_base64',
+ 'encode_noop',
+ 'encode_quopri',
+ ]
+
import base64
+
from quopri import encodestring as _encodestring
+
+
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
diff --git a/Lib/email/Errors.py b/Lib/email/errors.py
index e13a2c7..d52a624 100644
--- a/Lib/email/Errors.py
+++ b/Lib/email/errors.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
@@ -26,6 +26,10 @@ class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
+class CharsetError(MessageError):
+ """An illegal charset was given."""
+
+
# These are parsing defects which the parser was able to work around.
class MessageDefect:
diff --git a/Lib/email/FeedParser.py b/Lib/email/feedparser.py
index a2130e2..afb02b3 100644
--- a/Lib/email/FeedParser.py
+++ b/Lib/email/feedparser.py
@@ -19,9 +19,12 @@ the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
+__all__ = ['FeedParser']
+
import re
-from email import Errors
-from email import Message
+
+from email import errors
+from email import message
NLCRE = re.compile('\r\n|\r|\n')
NLCRE_bol = re.compile('(\r\n|\r|\n)')
@@ -130,7 +133,7 @@ class BufferedSubFile(object):
class FeedParser:
"""A feed-style parser of email."""
- def __init__(self, _factory=Message.Message):
+ def __init__(self, _factory=message.Message):
"""_factory is called with no arguments to create a new message obj"""
self._factory = _factory
self._input = BufferedSubFile()
@@ -164,7 +167,7 @@ class FeedParser:
# Look for final set of defects
if root.get_content_maintype() == 'multipart' \
and not root.is_multipart():
- root.defects.append(Errors.MultipartInvariantViolationDefect())
+ root.defects.append(errors.MultipartInvariantViolationDefect())
return root
def _new_message(self):
@@ -277,7 +280,7 @@ class FeedParser:
# defined a boundary. That's a problem which we'll handle by
# reading everything until the EOF and marking the message as
# defective.
- self._cur.defects.append(Errors.NoBoundaryInMultipartDefect())
+ self._cur.defects.append(errors.NoBoundaryInMultipartDefect())
lines = []
for line in self._input:
if line is NeedMoreData:
@@ -381,7 +384,7 @@ class FeedParser:
# that as a defect and store the captured text as the payload.
# Everything from here to the EOF is epilogue.
if capturing_preamble:
- self._cur.defects.append(Errors.StartBoundaryNotFoundDefect())
+ self._cur.defects.append(errors.StartBoundaryNotFoundDefect())
self._cur.set_payload(EMPTYSTRING.join(preamble))
epilogue = []
for line in self._input:
@@ -432,7 +435,7 @@ class FeedParser:
# The first line of the headers was a continuation. This
# is illegal, so let's note the defect, store the illegal
# line, and ignore it for purposes of headers.
- defect = Errors.FirstHeaderLineIsContinuationDefect(line)
+ defect = errors.FirstHeaderLineIsContinuationDefect(line)
self._cur.defects.append(defect)
continue
lastvalue.append(line)
@@ -460,13 +463,13 @@ class FeedParser:
else:
# Weirdly placed unix-from line. Note this as a defect
# and ignore it.
- defect = Errors.MisplacedEnvelopeHeaderDefect(line)
+ defect = errors.MisplacedEnvelopeHeaderDefect(line)
self._cur.defects.append(defect)
continue
# Split the line on the colon separating field name from value.
i = line.find(':')
if i < 0:
- defect = Errors.MalformedHeaderDefect(line)
+ defect = errors.MalformedHeaderDefect(line)
self._cur.defects.append(defect)
continue
lastheader = line[:i]
diff --git a/Lib/email/Generator.py b/Lib/email/generator.py
index 7969916..6e7a515 100644
--- a/Lib/email/Generator.py
+++ b/Lib/email/generator.py
@@ -4,14 +4,16 @@
"""Classes to generate plain text from a message object tree."""
+__all__ = ['Generator', 'DecodedGenerator']
+
import re
import sys
import time
import random
import warnings
-from cStringIO import StringIO
-from email.Header import Header
+from cStringIO import StringIO
+from email.header import Header
UNDERSCORE = '_'
NL = '\n'
@@ -81,12 +83,6 @@ class Generator:
print >> self._fp, ufrom
self._write(msg)
- # For backwards compatibility, but this is slower
- def __call__(self, msg, unixfrom=False):
- warnings.warn('__call__() deprecated; use flatten()',
- DeprecationWarning, 2)
- self.flatten(msg, unixfrom)
-
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
diff --git a/Lib/email/Header.py b/Lib/email/header.py
index 5e24afe..183c337 100644
--- a/Lib/email/Header.py
+++ b/Lib/email/header.py
@@ -1,16 +1,23 @@
-# Copyright (C) 2002-2004 Python Software Foundation
+# Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
+__all__ = [
+ 'Header',
+ 'decode_header',
+ 'make_header',
+ ]
+
import re
import binascii
-import email.quopriMIME
-import email.base64MIME
-from email.Errors import HeaderParseError
-from email.Charset import Charset
+import email.quoprimime
+import email.base64mime
+
+from email.errors import HeaderParseError
+from email.charset import Charset
NL = '\n'
SPACE = ' '
@@ -42,7 +49,7 @@ fcre = re.compile(r'[\041-\176]+:$')
# Helpers
-_max_append = email.quopriMIME._max_append
+_max_append = email.quoprimime._max_append
@@ -82,10 +89,10 @@ def decode_header(header):
encoded = parts[2]
dec = None
if encoding == 'q':
- dec = email.quopriMIME.header_decode(encoded)
+ dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
try:
- dec = email.base64MIME.decode(encoded)
+ dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
diff --git a/Lib/email/Iterators.py b/Lib/email/iterators.py
index 74a93c7..e99f228 100644
--- a/Lib/email/Iterators.py
+++ b/Lib/email/iterators.py
@@ -1,9 +1,16 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
+__all__ = [
+ 'body_line_iterator',
+ 'typed_subpart_iterator',
+ 'walk',
+ # Do not include _structure() since it's part of the debugging API.
+ ]
+
import sys
from cStringIO import StringIO
@@ -25,7 +32,6 @@ def walk(self):
# These two functions are imported into the Iterators.py interface module.
-# The Python 2.2 version uses generators for efficiency.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
diff --git a/Lib/email/Message.py b/Lib/email/message.py
index bc76416..50d90b4 100644
--- a/Lib/email/Message.py
+++ b/Lib/email/message.py
@@ -4,6 +4,8 @@
"""Basic message object for the email package object model."""
+__all__ = ['Message']
+
import re
import uu
import binascii
@@ -11,9 +13,9 @@ import warnings
from cStringIO import StringIO
# Intrapackage imports
-from email import Utils
-from email import Errors
-from email import Charset
+import email.charset
+from email import utils
+from email import errors
SEMISPACE = '; '
@@ -41,11 +43,11 @@ def _formatparam(param, value=None, quote=True):
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
- value = Utils.encode_rfc2231(value[2], value[0], value[1])
+ value = utils.encode_rfc2231(value[2], value[0], value[1])
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
- return '%s="%s"' % (param, Utils.quote(value))
+ return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
@@ -70,14 +72,14 @@ def _parseparam(s):
def _unquotevalue(value):
- # This is different than Utils.collapse_rfc2231_value() because it doesn't
+ # This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
- return value[0], value[1], Utils.unquote(value[2])
+ return value[0], value[1], utils.unquote(value[2])
else:
- return Utils.unquote(value)
+ return utils.unquote(value)
@@ -188,17 +190,17 @@ class Message:
return None
cte = self.get('content-transfer-encoding', '').lower()
if cte == 'quoted-printable':
- return Utils._qdecode(payload)
+ return utils._qdecode(payload)
elif cte == 'base64':
try:
- return Utils._bdecode(payload)
+ return utils._bdecode(payload)
except binascii.Error:
# Incorrect padding
return payload
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
sfp = StringIO()
try:
- uu.decode(StringIO(payload+'\n'), sfp)
+ uu.decode(StringIO(payload+'\n'), sfp, quiet=True)
payload = sfp.getvalue()
except uu.Error:
# Some decoding problem
@@ -237,8 +239,8 @@ class Message:
self._charset = None
return
if isinstance(charset, str):
- charset = Charset.Charset(charset)
- if not isinstance(charset, Charset.Charset):
+ charset = email.charset.Charset(charset)
+ if not isinstance(charset, email.charset.Charset):
raise TypeError(charset)
# BAW: should we accept strings that can serve as arguments to the
# Charset constructor?
@@ -413,49 +415,6 @@ class Message:
raise KeyError(_name)
#
- # Deprecated methods. These will be removed in email 3.1.
- #
-
- def get_type(self, failobj=None):
- """Returns the message's content type.
-
- The returned string is coerced to lowercase and returned as a single
- string of the form `maintype/subtype'. If there was no Content-Type
- header in the message, failobj is returned (defaults to None).
- """
- warnings.warn('get_type() deprecated; use get_content_type()',
- DeprecationWarning, 2)
- missing = object()
- value = self.get('content-type', missing)
- if value is missing:
- return failobj
- return paramre.split(value)[0].lower().strip()
-
- def get_main_type(self, failobj=None):
- """Return the message's main content type if present."""
- warnings.warn('get_main_type() deprecated; use get_content_maintype()',
- DeprecationWarning, 2)
- missing = object()
- ctype = self.get_type(missing)
- if ctype is missing:
- return failobj
- if ctype.count('/') <> 1:
- return failobj
- return ctype.split('/')[0]
-
- def get_subtype(self, failobj=None):
- """Return the message's content subtype if present."""
- warnings.warn('get_subtype() deprecated; use get_content_subtype()',
- DeprecationWarning, 2)
- missing = object()
- ctype = self.get_type(missing)
- if ctype is missing:
- return failobj
- if ctype.count('/') <> 1:
- return failobj
- return ctype.split('/')[1]
-
- #
# Use these three methods instead of the three above.
#
@@ -537,7 +496,7 @@ class Message:
name = p.strip()
val = ''
params.append((name, val))
- params = Utils.decode_params(params)
+ params = utils.decode_params(params)
return params
def get_params(self, failobj=None, header='content-type', unquote=True):
@@ -714,7 +673,7 @@ class Message:
filename = self.get_param('name', missing, 'content-disposition')
if filename is missing:
return failobj
- return Utils.collapse_rfc2231_value(filename).strip()
+ return utils.collapse_rfc2231_value(filename).strip()
def get_boundary(self, failobj=None):
"""Return the boundary associated with the payload if present.
@@ -727,7 +686,7 @@ class Message:
if boundary is missing:
return failobj
# RFC 2046 says that boundaries may begin but not end in w/s
- return Utils.collapse_rfc2231_value(boundary).rstrip()
+ return utils.collapse_rfc2231_value(boundary).rstrip()
def set_boundary(self, boundary):
"""Set the boundary parameter in Content-Type to 'boundary'.
@@ -744,7 +703,7 @@ class Message:
if params is missing:
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
- raise Errors.HeaderParseError, 'No Content-Type header found'
+ raise errors.HeaderParseError('No Content-Type header found')
newparams = []
foundp = False
for pk, pv in params:
diff --git a/Lib/email/mime/__init__.py b/Lib/email/mime/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/Lib/email/mime/__init__.py
diff --git a/Lib/email/mime/application.py b/Lib/email/mime/application.py
new file mode 100644
index 0000000..6f8bb8a
--- /dev/null
+++ b/Lib/email/mime/application.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Keith Dart
+# Contact: email-sig@python.org
+
+"""Class representing application/* type MIME documents."""
+
+__all__ = ["MIMEApplication"]
+
+from email import encoders
+from email.mime.nonmultipart import MIMENonMultipart
+
+
+class MIMEApplication(MIMENonMultipart):
+ """Class for generating application/* MIME documents."""
+
+ def __init__(self, _data, _subtype='octet-stream',
+ _encoder=encoders.encode_base64, **_params):
+ """Create an application/* type MIME document.
+
+ _data is a string containing the raw applicatoin data.
+
+ _subtype is the MIME content type subtype, defaulting to
+ 'octet-stream'.
+
+ _encoder is a function which will perform the actual encoding for
+ transport of the application data, defaulting to base64 encoding.
+
+ Any additional keyword arguments are passed to the base class
+ constructor, which turns them into parameters on the Content-Type
+ header.
+ """
+ if _subtype is None:
+ raise TypeError('Invalid application MIME subtype')
+ MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
+ self.set_payload(_data)
+ _encoder(self)
diff --git a/Lib/email/MIMEAudio.py b/Lib/email/mime/audio.py
index 266ec4c..c7290c4 100644
--- a/Lib/email/MIMEAudio.py
+++ b/Lib/email/mime/audio.py
@@ -1,15 +1,16 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Anthony Baxter
# Contact: email-sig@python.org
"""Class representing audio/* type MIME documents."""
+__all__ = ['MIMEAudio']
+
import sndhdr
-from cStringIO import StringIO
-from email import Errors
-from email import Encoders
-from email.MIMENonMultipart import MIMENonMultipart
+from cStringIO import StringIO
+from email import encoders
+from email.mime.nonmultipart import MIMENonMultipart
@@ -42,7 +43,7 @@ class MIMEAudio(MIMENonMultipart):
"""Class for generating audio/* MIME documents."""
def __init__(self, _audiodata, _subtype=None,
- _encoder=Encoders.encode_base64, **_params):
+ _encoder=encoders.encode_base64, **_params):
"""Create an audio/* type MIME document.
_audiodata is a string containing the raw audio data. If this data
diff --git a/Lib/email/MIMEBase.py b/Lib/email/mime/base.py
index 88691f8..ac91925 100644
--- a/Lib/email/MIMEBase.py
+++ b/Lib/email/mime/base.py
@@ -1,14 +1,16 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME specializations."""
-from email import Message
+__all__ = ['MIMEBase']
+
+from email import message
-class MIMEBase(Message.Message):
+class MIMEBase(message.Message):
"""Base class for MIME specializations."""
def __init__(self, _maintype, _subtype, **_params):
@@ -18,7 +20,7 @@ class MIMEBase(Message.Message):
arguments. Additional parameters for this header are taken from the
keyword arguments.
"""
- Message.Message.__init__(self)
+ message.Message.__init__(self)
ctype = '%s/%s' % (_maintype, _subtype)
self.add_header('Content-Type', ctype, **_params)
self['MIME-Version'] = '1.0'
diff --git a/Lib/email/MIMEImage.py b/Lib/email/mime/image.py
index a658067..5563823 100644
--- a/Lib/email/MIMEImage.py
+++ b/Lib/email/mime/image.py
@@ -1,14 +1,15 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing image/* type MIME documents."""
+__all__ = ['MIMEImage']
+
import imghdr
-from email import Errors
-from email import Encoders
-from email.MIMENonMultipart import MIMENonMultipart
+from email import encoders
+from email.mime.nonmultipart import MIMENonMultipart
@@ -16,7 +17,7 @@ class MIMEImage(MIMENonMultipart):
"""Class for generating image/* type MIME documents."""
def __init__(self, _imagedata, _subtype=None,
- _encoder=Encoders.encode_base64, **_params):
+ _encoder=encoders.encode_base64, **_params):
"""Create an image/* type MIME document.
_imagedata is a string containing the raw image data. If this data
diff --git a/Lib/email/MIMEMessage.py b/Lib/email/mime/message.py
index 3021934..275dbfd 100644
--- a/Lib/email/MIMEMessage.py
+++ b/Lib/email/mime/message.py
@@ -1,11 +1,13 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing message/* MIME documents."""
-from email import Message
-from email.MIMENonMultipart import MIMENonMultipart
+__all__ = ['MIMEMessage']
+
+from email import message
+from email.mime.nonmultipart import MIMENonMultipart
@@ -23,10 +25,10 @@ class MIMEMessage(MIMENonMultipart):
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
- if not isinstance(_msg, Message.Message):
+ if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
- Message.Message.attach(self, _msg)
+ message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
diff --git a/Lib/email/MIMEMultipart.py b/Lib/email/mime/multipart.py
index 9072a64..5c8c9db 100644
--- a/Lib/email/MIMEMultipart.py
+++ b/Lib/email/mime/multipart.py
@@ -1,14 +1,16 @@
-# Copyright (C) 2002-2004 Python Software Foundation
+# Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME multipart/* type messages."""
-from email import MIMEBase
+__all__ = ['MIMEMultipart']
+
+from email.mime.base import MIMEBase
-class MIMEMultipart(MIMEBase.MIMEBase):
+class MIMEMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
@@ -31,7 +33,7 @@ class MIMEMultipart(MIMEBase.MIMEBase):
Additional parameters for the Content-Type header are taken from the
keyword arguments (or passed into the _params argument).
"""
- MIMEBase.MIMEBase.__init__(self, 'multipart', _subtype, **_params)
+ MIMEBase.__init__(self, 'multipart', _subtype, **_params)
if _subparts:
for p in _subparts:
self.attach(p)
diff --git a/Lib/email/MIMENonMultipart.py b/Lib/email/mime/nonmultipart.py
index 4195d2a..dd280b5 100644
--- a/Lib/email/MIMENonMultipart.py
+++ b/Lib/email/mime/nonmultipart.py
@@ -1,15 +1,17 @@
-# Copyright (C) 2002-2004 Python Software Foundation
+# Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME type messages that are not multipart."""
-from email import Errors
-from email import MIMEBase
+__all__ = ['MIMENonMultipart']
+
+from email import errors
+from email.mime.base import MIMEBase
-class MIMENonMultipart(MIMEBase.MIMEBase):
+class MIMENonMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
__pychecker__ = 'unusednames=payload'
@@ -18,7 +20,7 @@ class MIMENonMultipart(MIMEBase.MIMEBase):
# The public API prohibits attaching multiple subparts to MIMEBase
# derived subtypes since none of them are, by definition, of content
# type multipart/*
- raise Errors.MultipartConversionError(
+ raise errors.MultipartConversionError(
'Cannot attach additional subparts to non-multipart/*')
del __pychecker__
diff --git a/Lib/email/MIMEText.py b/Lib/email/mime/text.py
index 5ef1876..5747db5 100644
--- a/Lib/email/MIMEText.py
+++ b/Lib/email/mime/text.py
@@ -1,11 +1,13 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing text/* type MIME documents."""
-from email.MIMENonMultipart import MIMENonMultipart
-from email.Encoders import encode_7or8bit
+__all__ = ['MIMEText']
+
+from email.encoders import encode_7or8bit
+from email.mime.nonmultipart import MIMENonMultipart
diff --git a/Lib/email/Parser.py b/Lib/email/parser.py
index 0c05224..2fcaf25 100644
--- a/Lib/email/Parser.py
+++ b/Lib/email/parser.py
@@ -1,13 +1,16 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: email-sig@python.org
"""A parser of RFC 2822 and MIME email messages."""
+__all__ = ['Parser', 'HeaderParser']
+
import warnings
from cStringIO import StringIO
-from email.FeedParser import FeedParser
-from email.Message import Message
+
+from email.feedparser import FeedParser
+from email.message import Message
diff --git a/Lib/email/quopriMIME.py b/Lib/email/quoprimime.py
index a9b5d49..a5658dd 100644
--- a/Lib/email/quopriMIME.py
+++ b/Lib/email/quoprimime.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
@@ -26,9 +26,27 @@ does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.Header module.
"""
+__all__ = [
+ 'body_decode',
+ 'body_encode',
+ 'body_quopri_check',
+ 'body_quopri_len',
+ 'decode',
+ 'decodestring',
+ 'encode',
+ 'encodestring',
+ 'header_decode',
+ 'header_encode',
+ 'header_quopri_check',
+ 'header_quopri_len',
+ 'quote',
+ 'unquote',
+ ]
+
import re
+
from string import hexdigits
-from email.Utils import fix_eols
+from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
diff --git a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
index 5a42c227..d977693 100644
--- a/Lib/email/test/test_email.py
+++ b/Lib/email/test/test_email.py
@@ -39,9 +39,6 @@ NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
-# We don't care about DeprecationWarnings
-warnings.filterwarnings('ignore', '', DeprecationWarning, __name__)
-
def openfile(filename, mode='r'):
@@ -87,7 +84,7 @@ class TestMessageAPI(TestEmailBase):
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
- eq(msg.get_type(), 'text/plain')
+ eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
@@ -211,6 +208,19 @@ class TestMessageAPI(TestEmailBase):
msg.set_payload('foo')
eq(msg.get_payload(decode=True), 'foo')
+ def test_decode_bogus_uu_payload_quietly(self):
+ msg = Message()
+ msg.set_payload('begin 664 foo.txt\n%<W1F=0000H \n \nend\n')
+ msg['Content-Transfer-Encoding'] = 'x-uuencode'
+ old_stderr = sys.stderr
+ try:
+ sys.stderr = sfp = StringIO()
+ # We don't care about the payload
+ msg.get_payload(decode=True)
+ finally:
+ sys.stderr = old_stderr
+ self.assertEqual(sfp.getvalue(), '')
+
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
@@ -893,7 +903,7 @@ class TestMIMEAudio(unittest.TestCase):
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
- self.assertEqual(self._au.get_type(), 'audio/basic')
+ self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
@@ -901,7 +911,7 @@ class TestMIMEAudio(unittest.TestCase):
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
- self.assertEqual(au.get_type(), 'audio/fish')
+ self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
@@ -936,7 +946,7 @@ class TestMIMEImage(unittest.TestCase):
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
- self.assertEqual(self._im.get_type(), 'image/gif')
+ self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
@@ -944,7 +954,7 @@ class TestMIMEImage(unittest.TestCase):
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
- self.assertEqual(im.get_type(), 'image/fish')
+ self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
@@ -976,7 +986,7 @@ class TestMIMEText(unittest.TestCase):
def test_types(self):
eq = self.assertEqual
unless = self.failUnless
- eq(self._msg.get_type(), 'text/plain')
+ eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
unless(self._msg.get_param('foobar', missing) is missing)
@@ -1045,7 +1055,7 @@ This is the dingus fish.
# tests
m = self._msg
unless(m.is_multipart())
- eq(m.get_type(), 'multipart/mixed')
+ eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
@@ -1379,7 +1389,7 @@ class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
- eq(msg.get_type(), 'text')
+ eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
@@ -1531,7 +1541,7 @@ class TestMIMEMessage(TestEmailBase):
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
- eq(r.get_type(), 'message/rfc822')
+ eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
@@ -1572,7 +1582,7 @@ Here is the body of the message.
eq = self.assertEqual
unless = self.failUnless
msg = self._msgobj('msg_11.txt')
- eq(msg.get_type(), 'message/rfc822')
+ eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
@@ -1586,12 +1596,12 @@ Here is the body of the message.
unless = self.failUnless
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
- eq(msg.get_type(), 'multipart/report')
+ eq(msg.get_content_type(), 'multipart/report')
unless(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
- eq(subpart.get_type(), 'text/plain')
+ eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
@@ -1611,7 +1621,7 @@ Your message cannot be delivered to the following recipients:
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
- eq(subpart.get_type(), 'message/delivery-status')
+ eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
@@ -1629,13 +1639,13 @@ Your message cannot be delivered to the following recipients:
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
- eq(subpart.get_type(), 'message/rfc822')
+ eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subsubpart = payload[0]
unless(isinstance(subsubpart, Message))
- eq(subsubpart.get_type(), 'text/plain')
+ eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
@@ -1706,16 +1716,16 @@ Two
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
- eq(container1.get_type(), None)
+ eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
- eq(container2.get_type(), None)
+ eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
- eq(container1a.get_type(), 'text/plain')
+ eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
- eq(container2a.get_type(), 'text/plain')
+ eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
@@ -1726,16 +1736,16 @@ Two
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
- eq(container1.get_type(), 'message/rfc822')
+ eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
- eq(container2.get_type(), 'message/rfc822')
+ eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
- eq(container1a.get_type(), 'text/plain')
+ eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
- eq(container2a.get_type(), 'text/plain')
+ eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
@@ -1750,9 +1760,9 @@ Two
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
- eq(subpart1.get_type(), 'message/rfc822')
+ eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
- eq(subpart2.get_type(), 'message/rfc822')
+ eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
@@ -1784,9 +1794,9 @@ message 2
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
- eq(subpart1.get_type(), None)
+ eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
- eq(subpart2.get_type(), None)
+ eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
@@ -1847,7 +1857,7 @@ class TestIdempotent(TestEmailBase):
def test_parse_text_message(self):
eq = self.assertEquals
msg, text = self._msgobj('msg_01.txt')
- eq(msg.get_type(), 'text/plain')
+ eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
@@ -1859,7 +1869,7 @@ class TestIdempotent(TestEmailBase):
def test_parse_untyped_message(self):
eq = self.assertEquals
msg, text = self._msgobj('msg_03.txt')
- eq(msg.get_type(), None)
+ eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
@@ -1933,7 +1943,7 @@ class TestIdempotent(TestEmailBase):
unless = self.failUnless
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
- eq(msg.get_type(), 'multipart/report')
+ eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
@@ -1945,13 +1955,13 @@ class TestIdempotent(TestEmailBase):
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
- eq(msg1.get_type(), 'text/plain')
+ eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda\n')
msg2 = msg.get_payload(1)
- eq(msg2.get_type(), None)
+ eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
- eq(msg3.get_type(), 'message/rfc822')
+ eq(msg3.get_content_type(), 'message/rfc822')
self.failUnless(isinstance(msg3, Message))
payload = msg3.get_payload()
unless(isinstance(payload, list))
@@ -1965,7 +1975,7 @@ class TestIdempotent(TestEmailBase):
unless = self.failUnless
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
- eq(msg.get_type(), 'message/rfc822')
+ eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
@@ -1973,7 +1983,7 @@ class TestIdempotent(TestEmailBase):
eq(len(payload), 1)
msg1 = payload[0]
self.failUnless(isinstance(msg1, Message))
- eq(msg1.get_type(), 'text/plain')
+ eq(msg1.get_content_type(), 'text/plain')
self.failUnless(isinstance(msg1.get_payload(), str))
eq(msg1.get_payload(), '\n')
@@ -2058,13 +2068,19 @@ class TestMiscellaneous(TestEmailBase):
module = __import__('email')
all = module.__all__
all.sort()
- self.assertEqual(all, ['Charset', 'Encoders', 'Errors', 'Generator',
- 'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
- 'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
- 'MIMENonMultipart', 'MIMEText', 'Message',
- 'Parser', 'Utils', 'base64MIME',
- 'message_from_file', 'message_from_string',
- 'quopriMIME'])
+ self.assertEqual(all, [
+ # Old names
+ 'Charset', 'Encoders', 'Errors', 'Generator',
+ 'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
+ 'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
+ 'MIMENonMultipart', 'MIMEText', 'Message',
+ 'Parser', 'Utils', 'base64MIME',
+ # new names
+ 'base64mime', 'charset', 'encoders', 'errors', 'generator',
+ 'header', 'iterators', 'message', 'message_from_file',
+ 'message_from_string', 'mime', 'parser',
+ 'quopriMIME', 'quoprimime', 'utils',
+ ])
def test_formatdate(self):
now = time.time()
@@ -2097,12 +2113,12 @@ class TestMiscellaneous(TestEmailBase):
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(Utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
- (2003, 2, 25, 13, 47, 26, 0, 1, 0, -28800))
+ (2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(Utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
- (2003, 2, 5, 13, 47, 26, 0, 1, 0, -28800))
+ (2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
@@ -2356,7 +2372,7 @@ class TestParsers(TestEmailBase):
fp.close()
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
- eq(msg.get_type(), 'multipart/mixed')
+ eq(msg.get_content_type(), 'multipart/mixed')
self.failIf(msg.is_multipart())
self.failUnless(isinstance(msg.get_payload(), str))
@@ -2405,10 +2421,10 @@ Here's the message body
fp.close()
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
- eq(part1.get_type(), 'text/plain')
+ eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
- eq(part2.get_type(), 'application/riscos')
+ eq(part2.get_content_type(), 'application/riscos')
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
@@ -2427,21 +2443,21 @@ Here's the message body
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
- eq(part1.get_type(), 'message/rfc822')
+ eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
- eq(part1a.get_type(), 'text/plain')
+ eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
- eq(part2.get_type(), 'message/rfc822')
+ eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
- eq(part2a.get_type(), 'text/plain')
+ eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
@@ -2723,6 +2739,11 @@ class TestCharset(unittest.TestCase):
c = Charset('fake')
eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
+ def test_unicode_charset_name(self):
+ charset = Charset(u'us-ascii')
+ self.assertEqual(str(charset), 'us-ascii')
+ self.assertRaises(Errors.CharsetError, Charset, 'asc\xffii')
+
# Test multilingual MIME headers.
diff --git a/Lib/email/test/test_email_codecs.py b/Lib/email/test/test_email_codecs.py
index 159989c..38b7d95 100644
--- a/Lib/email/test/test_email_codecs.py
+++ b/Lib/email/test/test_email_codecs.py
@@ -10,6 +10,13 @@ from email.Charset import Charset
from email.Header import Header, decode_header
from email.Message import Message
+# We're compatible with Python 2.3, but it doesn't have the built-in Asian
+# codecs, so we have to skip all these tests.
+try:
+ unicode('foo', 'euc-jp')
+except LookupError:
+ raise TestSkipped
+
class TestEmailAsianCodecs(TestEmailBase):
diff --git a/Lib/email/test/test_email_codecs_renamed.py b/Lib/email/test/test_email_codecs_renamed.py
new file mode 100644
index 0000000..56baccd
--- /dev/null
+++ b/Lib/email/test/test_email_codecs_renamed.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2002-2006 Python Software Foundation
+# Contact: email-sig@python.org
+# email package unit tests for (optional) Asian codecs
+
+import unittest
+from test.test_support import TestSkipped, run_unittest
+
+from email.test.test_email import TestEmailBase
+from email.charset import Charset
+from email.header import Header, decode_header
+from email.message import Message
+
+# We're compatible with Python 2.3, but it doesn't have the built-in Asian
+# codecs, so we have to skip all these tests.
+try:
+ unicode('foo', 'euc-jp')
+except LookupError:
+ raise TestSkipped
+
+
+
+class TestEmailAsianCodecs(TestEmailBase):
+ def test_japanese_codecs(self):
+ eq = self.ndiffAssertEqual
+ j = Charset("euc-jp")
+ g = Charset("iso-8859-1")
+ h = Header("Hello World!")
+ jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
+ ghello = 'Gr\xfc\xdf Gott!'
+ h.append(jhello, j)
+ h.append(ghello, g)
+ # BAW: This used to -- and maybe should -- fold the two iso-8859-1
+ # chunks into a single encoded word. However it doesn't violate the
+ # standard to have them as two encoded chunks and maybe it's
+ # reasonable <wink> for each .append() call to result in a separate
+ # encoded word.
+ eq(h.encode(), """\
+Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=
+ =?iso-8859-1?q?Gr=FC=DF?= =?iso-8859-1?q?_Gott!?=""")
+ eq(decode_header(h.encode()),
+ [('Hello World!', None),
+ ('\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),
+ ('Gr\xfc\xdf Gott!', 'iso-8859-1')])
+ long = 'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9'
+ h = Header(long, j, header_name="Subject")
+ # test a very long header
+ enc = h.encode()
+ # TK: splitting point may differ by codec design and/or Header encoding
+ eq(enc , """\
+=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=
+ =?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")
+ # TK: full decode comparison
+ eq(h.__unicode__().encode('euc-jp'), long)
+
+ def test_payload_encoding(self):
+ jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
+ jcode = 'euc-jp'
+ msg = Message()
+ msg.set_payload(jhello, jcode)
+ ustr = unicode(msg.get_payload(), msg.get_content_charset())
+ self.assertEqual(jhello, ustr.encode(jcode))
+
+
+
+def suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))
+ return suite
+
+
+def test_main():
+ run_unittest(TestEmailAsianCodecs)
+
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='suite')
diff --git a/Lib/email/test/test_email_renamed.py b/Lib/email/test/test_email_renamed.py
new file mode 100644
index 0000000..4ac2ee9
--- /dev/null
+++ b/Lib/email/test/test_email_renamed.py
@@ -0,0 +1,3078 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Contact: email-sig@python.org
+# email package unit tests
+
+import os
+import sys
+import time
+import base64
+import difflib
+import unittest
+import warnings
+from cStringIO import StringIO
+
+import email
+
+from email.charset import Charset
+from email.header import Header, decode_header, make_header
+from email.parser import Parser, HeaderParser
+from email.generator import Generator, DecodedGenerator
+from email.message import Message
+from email.mime.application import MIMEApplication
+from email.mime.audio import MIMEAudio
+from email.mime.text import MIMEText
+from email.mime.image import MIMEImage
+from email.mime.base import MIMEBase
+from email.mime.message import MIMEMessage
+from email.mime.multipart import MIMEMultipart
+from email import utils
+from email import errors
+from email import encoders
+from email import iterators
+from email import base64mime
+from email import quoprimime
+
+from test.test_support import findfile, run_unittest
+from email.test import __file__ as landmark
+
+
+NL = '\n'
+EMPTYSTRING = ''
+SPACE = ' '
+
+
+
+def openfile(filename, mode='r'):
+ path = os.path.join(os.path.dirname(landmark), 'data', filename)
+ return open(path, mode)
+
+
+
+# Base test class
+class TestEmailBase(unittest.TestCase):
+ def ndiffAssertEqual(self, first, second):
+ """Like failUnlessEqual except use ndiff for readable output."""
+ if first <> second:
+ sfirst = str(first)
+ ssecond = str(second)
+ diff = difflib.ndiff(sfirst.splitlines(), ssecond.splitlines())
+ fp = StringIO()
+ print >> fp, NL, NL.join(diff)
+ raise self.failureException, fp.getvalue()
+
+ def _msgobj(self, filename):
+ fp = openfile(findfile(filename))
+ try:
+ msg = email.message_from_file(fp)
+ finally:
+ fp.close()
+ return msg
+
+
+
+# Test various aspects of the Message class's API
+class TestMessageAPI(TestEmailBase):
+ def test_get_all(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_20.txt')
+ eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
+ eq(msg.get_all('xx', 'n/a'), 'n/a')
+
+ def test_getset_charset(self):
+ eq = self.assertEqual
+ msg = Message()
+ eq(msg.get_charset(), None)
+ charset = Charset('iso-8859-1')
+ msg.set_charset(charset)
+ eq(msg['mime-version'], '1.0')
+ eq(msg.get_content_type(), 'text/plain')
+ eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
+ eq(msg.get_param('charset'), 'iso-8859-1')
+ eq(msg['content-transfer-encoding'], 'quoted-printable')
+ eq(msg.get_charset().input_charset, 'iso-8859-1')
+ # Remove the charset
+ msg.set_charset(None)
+ eq(msg.get_charset(), None)
+ eq(msg['content-type'], 'text/plain')
+ # Try adding a charset when there's already MIME headers present
+ msg = Message()
+ msg['MIME-Version'] = '2.0'
+ msg['Content-Type'] = 'text/x-weird'
+ msg['Content-Transfer-Encoding'] = 'quinted-puntable'
+ msg.set_charset(charset)
+ eq(msg['mime-version'], '2.0')
+ eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
+ eq(msg['content-transfer-encoding'], 'quinted-puntable')
+
+ def test_set_charset_from_string(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.set_charset('us-ascii')
+ eq(msg.get_charset().input_charset, 'us-ascii')
+ eq(msg['content-type'], 'text/plain; charset="us-ascii"')
+
+ def test_set_payload_with_charset(self):
+ msg = Message()
+ charset = Charset('iso-8859-1')
+ msg.set_payload('This is a string payload', charset)
+ self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
+
+ def test_get_charsets(self):
+ eq = self.assertEqual
+
+ msg = self._msgobj('msg_08.txt')
+ charsets = msg.get_charsets()
+ eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
+
+ msg = self._msgobj('msg_09.txt')
+ charsets = msg.get_charsets('dingbat')
+ eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
+ 'koi8-r'])
+
+ msg = self._msgobj('msg_12.txt')
+ charsets = msg.get_charsets()
+ eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
+ 'iso-8859-3', 'us-ascii', 'koi8-r'])
+
+ def test_get_filename(self):
+ eq = self.assertEqual
+
+ msg = self._msgobj('msg_04.txt')
+ filenames = [p.get_filename() for p in msg.get_payload()]
+ eq(filenames, ['msg.txt', 'msg.txt'])
+
+ msg = self._msgobj('msg_07.txt')
+ subpart = msg.get_payload(1)
+ eq(subpart.get_filename(), 'dingusfish.gif')
+
+ def test_get_filename_with_name_parameter(self):
+ eq = self.assertEqual
+
+ msg = self._msgobj('msg_44.txt')
+ filenames = [p.get_filename() for p in msg.get_payload()]
+ eq(filenames, ['msg.txt', 'msg.txt'])
+
+ def test_get_boundary(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_07.txt')
+ # No quotes!
+ eq(msg.get_boundary(), 'BOUNDARY')
+
+ def test_set_boundary(self):
+ eq = self.assertEqual
+ # This one has no existing boundary parameter, but the Content-Type:
+ # header appears fifth.
+ msg = self._msgobj('msg_01.txt')
+ msg.set_boundary('BOUNDARY')
+ header, value = msg.items()[4]
+ eq(header.lower(), 'content-type')
+ eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
+ # This one has a Content-Type: header, with a boundary, stuck in the
+ # middle of its headers. Make sure the order is preserved; it should
+ # be fifth.
+ msg = self._msgobj('msg_04.txt')
+ msg.set_boundary('BOUNDARY')
+ header, value = msg.items()[4]
+ eq(header.lower(), 'content-type')
+ eq(value, 'multipart/mixed; boundary="BOUNDARY"')
+ # And this one has no Content-Type: header at all.
+ msg = self._msgobj('msg_03.txt')
+ self.assertRaises(errors.HeaderParseError,
+ msg.set_boundary, 'BOUNDARY')
+
+ def test_get_decoded_payload(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_10.txt')
+ # The outer message is a multipart
+ eq(msg.get_payload(decode=True), None)
+ # Subpart 1 is 7bit encoded
+ eq(msg.get_payload(0).get_payload(decode=True),
+ 'This is a 7bit encoded message.\n')
+ # Subpart 2 is quopri
+ eq(msg.get_payload(1).get_payload(decode=True),
+ '\xa1This is a Quoted Printable encoded message!\n')
+ # Subpart 3 is base64
+ eq(msg.get_payload(2).get_payload(decode=True),
+ 'This is a Base64 encoded message.')
+ # Subpart 4 has no Content-Transfer-Encoding: header.
+ eq(msg.get_payload(3).get_payload(decode=True),
+ 'This has no Content-Transfer-Encoding: header.\n')
+
+ def test_get_decoded_uu_payload(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
+ for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
+ msg['content-transfer-encoding'] = cte
+ eq(msg.get_payload(decode=True), 'hello world')
+ # Now try some bogus data
+ msg.set_payload('foo')
+ eq(msg.get_payload(decode=True), 'foo')
+
+ def test_decoded_generator(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_07.txt')
+ fp = openfile('msg_17.txt')
+ try:
+ text = fp.read()
+ finally:
+ fp.close()
+ s = StringIO()
+ g = DecodedGenerator(s)
+ g.flatten(msg)
+ eq(s.getvalue(), text)
+
+ def test__contains__(self):
+ msg = Message()
+ msg['From'] = 'Me'
+ msg['to'] = 'You'
+ # Check for case insensitivity
+ self.failUnless('from' in msg)
+ self.failUnless('From' in msg)
+ self.failUnless('FROM' in msg)
+ self.failUnless('to' in msg)
+ self.failUnless('To' in msg)
+ self.failUnless('TO' in msg)
+
+ def test_as_string(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_01.txt')
+ fp = openfile('msg_01.txt')
+ try:
+ text = fp.read()
+ finally:
+ fp.close()
+ eq(text, msg.as_string())
+ fullrepr = str(msg)
+ lines = fullrepr.split('\n')
+ self.failUnless(lines[0].startswith('From '))
+ eq(text, NL.join(lines[1:]))
+
+ def test_bad_param(self):
+ msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
+ self.assertEqual(msg.get_param('baz'), '')
+
+ def test_missing_filename(self):
+ msg = email.message_from_string("From: foo\n")
+ self.assertEqual(msg.get_filename(), None)
+
+ def test_bogus_filename(self):
+ msg = email.message_from_string(
+ "Content-Disposition: blarg; filename\n")
+ self.assertEqual(msg.get_filename(), '')
+
+ def test_missing_boundary(self):
+ msg = email.message_from_string("From: foo\n")
+ self.assertEqual(msg.get_boundary(), None)
+
+ def test_get_params(self):
+ eq = self.assertEqual
+ msg = email.message_from_string(
+ 'X-Header: foo=one; bar=two; baz=three\n')
+ eq(msg.get_params(header='x-header'),
+ [('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
+ msg = email.message_from_string(
+ 'X-Header: foo; bar=one; baz=two\n')
+ eq(msg.get_params(header='x-header'),
+ [('foo', ''), ('bar', 'one'), ('baz', 'two')])
+ eq(msg.get_params(), None)
+ msg = email.message_from_string(
+ 'X-Header: foo; bar="one"; baz=two\n')
+ eq(msg.get_params(header='x-header'),
+ [('foo', ''), ('bar', 'one'), ('baz', 'two')])
+
+ def test_get_param_liberal(self):
+ msg = Message()
+ msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
+ self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
+
+ def test_get_param(self):
+ eq = self.assertEqual
+ msg = email.message_from_string(
+ "X-Header: foo=one; bar=two; baz=three\n")
+ eq(msg.get_param('bar', header='x-header'), 'two')
+ eq(msg.get_param('quuz', header='x-header'), None)
+ eq(msg.get_param('quuz'), None)
+ msg = email.message_from_string(
+ 'X-Header: foo; bar="one"; baz=two\n')
+ eq(msg.get_param('foo', header='x-header'), '')
+ eq(msg.get_param('bar', header='x-header'), 'one')
+ eq(msg.get_param('baz', header='x-header'), 'two')
+ # XXX: We are not RFC-2045 compliant! We cannot parse:
+ # msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
+ # msg.get_param("weird")
+ # yet.
+
+ def test_get_param_funky_continuation_lines(self):
+ msg = self._msgobj('msg_22.txt')
+ self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
+
+ def test_get_param_with_semis_in_quotes(self):
+ msg = email.message_from_string(
+ 'Content-Type: image/pjpeg; name="Jim&amp;&amp;Jill"\n')
+ self.assertEqual(msg.get_param('name'), 'Jim&amp;&amp;Jill')
+ self.assertEqual(msg.get_param('name', unquote=False),
+ '"Jim&amp;&amp;Jill"')
+
+ def test_has_key(self):
+ msg = email.message_from_string('Header: exists')
+ self.failUnless(msg.has_key('header'))
+ self.failUnless(msg.has_key('Header'))
+ self.failUnless(msg.has_key('HEADER'))
+ self.failIf(msg.has_key('headeri'))
+
+ def test_set_param(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.set_param('charset', 'iso-2022-jp')
+ eq(msg.get_param('charset'), 'iso-2022-jp')
+ msg.set_param('importance', 'high value')
+ eq(msg.get_param('importance'), 'high value')
+ eq(msg.get_param('importance', unquote=False), '"high value"')
+ eq(msg.get_params(), [('text/plain', ''),
+ ('charset', 'iso-2022-jp'),
+ ('importance', 'high value')])
+ eq(msg.get_params(unquote=False), [('text/plain', ''),
+ ('charset', '"iso-2022-jp"'),
+ ('importance', '"high value"')])
+ msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
+ eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
+
+ def test_del_param(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_05.txt')
+ eq(msg.get_params(),
+ [('multipart/report', ''), ('report-type', 'delivery-status'),
+ ('boundary', 'D1690A7AC1.996856090/mail.example.com')])
+ old_val = msg.get_param("report-type")
+ msg.del_param("report-type")
+ eq(msg.get_params(),
+ [('multipart/report', ''),
+ ('boundary', 'D1690A7AC1.996856090/mail.example.com')])
+ msg.set_param("report-type", old_val)
+ eq(msg.get_params(),
+ [('multipart/report', ''),
+ ('boundary', 'D1690A7AC1.996856090/mail.example.com'),
+ ('report-type', old_val)])
+
+ def test_del_param_on_other_header(self):
+ msg = Message()
+ msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
+ msg.del_param('filename', 'content-disposition')
+ self.assertEqual(msg['content-disposition'], 'attachment')
+
+ def test_set_type(self):
+ eq = self.assertEqual
+ msg = Message()
+ self.assertRaises(ValueError, msg.set_type, 'text')
+ msg.set_type('text/plain')
+ eq(msg['content-type'], 'text/plain')
+ msg.set_param('charset', 'us-ascii')
+ eq(msg['content-type'], 'text/plain; charset="us-ascii"')
+ msg.set_type('text/html')
+ eq(msg['content-type'], 'text/html; charset="us-ascii"')
+
+ def test_set_type_on_other_header(self):
+ msg = Message()
+ msg['X-Content-Type'] = 'text/plain'
+ msg.set_type('application/octet-stream', 'X-Content-Type')
+ self.assertEqual(msg['x-content-type'], 'application/octet-stream')
+
+ def test_get_content_type_missing(self):
+ msg = Message()
+ self.assertEqual(msg.get_content_type(), 'text/plain')
+
+ def test_get_content_type_missing_with_default_type(self):
+ msg = Message()
+ msg.set_default_type('message/rfc822')
+ self.assertEqual(msg.get_content_type(), 'message/rfc822')
+
+ def test_get_content_type_from_message_implicit(self):
+ msg = self._msgobj('msg_30.txt')
+ self.assertEqual(msg.get_payload(0).get_content_type(),
+ 'message/rfc822')
+
+ def test_get_content_type_from_message_explicit(self):
+ msg = self._msgobj('msg_28.txt')
+ self.assertEqual(msg.get_payload(0).get_content_type(),
+ 'message/rfc822')
+
+ def test_get_content_type_from_message_text_plain_implicit(self):
+ msg = self._msgobj('msg_03.txt')
+ self.assertEqual(msg.get_content_type(), 'text/plain')
+
+ def test_get_content_type_from_message_text_plain_explicit(self):
+ msg = self._msgobj('msg_01.txt')
+ self.assertEqual(msg.get_content_type(), 'text/plain')
+
+ def test_get_content_maintype_missing(self):
+ msg = Message()
+ self.assertEqual(msg.get_content_maintype(), 'text')
+
+ def test_get_content_maintype_missing_with_default_type(self):
+ msg = Message()
+ msg.set_default_type('message/rfc822')
+ self.assertEqual(msg.get_content_maintype(), 'message')
+
+ def test_get_content_maintype_from_message_implicit(self):
+ msg = self._msgobj('msg_30.txt')
+ self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
+
+ def test_get_content_maintype_from_message_explicit(self):
+ msg = self._msgobj('msg_28.txt')
+ self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
+
+ def test_get_content_maintype_from_message_text_plain_implicit(self):
+ msg = self._msgobj('msg_03.txt')
+ self.assertEqual(msg.get_content_maintype(), 'text')
+
+ def test_get_content_maintype_from_message_text_plain_explicit(self):
+ msg = self._msgobj('msg_01.txt')
+ self.assertEqual(msg.get_content_maintype(), 'text')
+
+ def test_get_content_subtype_missing(self):
+ msg = Message()
+ self.assertEqual(msg.get_content_subtype(), 'plain')
+
+ def test_get_content_subtype_missing_with_default_type(self):
+ msg = Message()
+ msg.set_default_type('message/rfc822')
+ self.assertEqual(msg.get_content_subtype(), 'rfc822')
+
+ def test_get_content_subtype_from_message_implicit(self):
+ msg = self._msgobj('msg_30.txt')
+ self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
+
+ def test_get_content_subtype_from_message_explicit(self):
+ msg = self._msgobj('msg_28.txt')
+ self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
+
+ def test_get_content_subtype_from_message_text_plain_implicit(self):
+ msg = self._msgobj('msg_03.txt')
+ self.assertEqual(msg.get_content_subtype(), 'plain')
+
+ def test_get_content_subtype_from_message_text_plain_explicit(self):
+ msg = self._msgobj('msg_01.txt')
+ self.assertEqual(msg.get_content_subtype(), 'plain')
+
+ def test_get_content_maintype_error(self):
+ msg = Message()
+ msg['Content-Type'] = 'no-slash-in-this-string'
+ self.assertEqual(msg.get_content_maintype(), 'text')
+
+ def test_get_content_subtype_error(self):
+ msg = Message()
+ msg['Content-Type'] = 'no-slash-in-this-string'
+ self.assertEqual(msg.get_content_subtype(), 'plain')
+
+ def test_replace_header(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.add_header('First', 'One')
+ msg.add_header('Second', 'Two')
+ msg.add_header('Third', 'Three')
+ eq(msg.keys(), ['First', 'Second', 'Third'])
+ eq(msg.values(), ['One', 'Two', 'Three'])
+ msg.replace_header('Second', 'Twenty')
+ eq(msg.keys(), ['First', 'Second', 'Third'])
+ eq(msg.values(), ['One', 'Twenty', 'Three'])
+ msg.add_header('First', 'Eleven')
+ msg.replace_header('First', 'One Hundred')
+ eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
+ eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
+ self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
+
+ def test_broken_base64_payload(self):
+ x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
+ msg = Message()
+ msg['content-type'] = 'audio/x-midi'
+ msg['content-transfer-encoding'] = 'base64'
+ msg.set_payload(x)
+ self.assertEqual(msg.get_payload(decode=True), x)
+
+
+
+# Test the email.encoders module
+class TestEncoders(unittest.TestCase):
+ def test_encode_empty_payload(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.set_charset('us-ascii')
+ eq(msg['content-transfer-encoding'], '7bit')
+
+ def test_default_cte(self):
+ eq = self.assertEqual
+ msg = MIMEText('hello world')
+ eq(msg['content-transfer-encoding'], '7bit')
+
+ def test_default_cte(self):
+ eq = self.assertEqual
+ # With no explicit _charset its us-ascii, and all are 7-bit
+ msg = MIMEText('hello world')
+ eq(msg['content-transfer-encoding'], '7bit')
+ # Similar, but with 8-bit data
+ msg = MIMEText('hello \xf8 world')
+ eq(msg['content-transfer-encoding'], '8bit')
+ # And now with a different charset
+ msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
+ eq(msg['content-transfer-encoding'], 'quoted-printable')
+
+
+
+# Test long header wrapping
+class TestLongHeaders(TestEmailBase):
+ def test_split_long_continuation(self):
+ eq = self.ndiffAssertEqual
+ msg = email.message_from_string("""\
+Subject: bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text
+
+test
+""")
+ sfp = StringIO()
+ g = Generator(sfp)
+ g.flatten(msg)
+ eq(sfp.getvalue(), """\
+Subject: bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text
+
+test
+""")
+
+ def test_another_long_almost_unsplittable_header(self):
+ eq = self.ndiffAssertEqual
+ hstr = """\
+bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text"""
+ h = Header(hstr, continuation_ws='\t')
+ eq(h.encode(), """\
+bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text""")
+ h = Header(hstr)
+ eq(h.encode(), """\
+bug demonstration
+ 12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+ more text""")
+
+ def test_long_nonstring(self):
+ eq = self.ndiffAssertEqual
+ g = Charset("iso-8859-1")
+ cz = Charset("iso-8859-2")
+ utf8 = Charset("utf-8")
+ g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
+ cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
+ utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
+ h = Header(g_head, g, header_name='Subject')
+ h.append(cz_head, cz)
+ h.append(utf8_head, utf8)
+ msg = Message()
+ msg['Subject'] = h
+ sfp = StringIO()
+ g = Generator(sfp)
+ g.flatten(msg)
+ eq(sfp.getvalue(), """\
+Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
+ =?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
+ =?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
+ =?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
+ =?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
+ =?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
+ =?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
+ =?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
+ =?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
+ =?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
+ =?utf-8?b?44Gm44GE44G+44GZ44CC?=
+
+""")
+ eq(h.encode(), """\
+=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
+ =?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
+ =?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
+ =?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
+ =?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
+ =?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
+ =?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
+ =?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
+ =?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
+ =?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
+ =?utf-8?b?44Gm44GE44G+44GZ44CC?=""")
+
+ def test_long_header_encode(self):
+ eq = self.ndiffAssertEqual
+ h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
+ 'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
+ header_name='X-Foobar-Spoink-Defrobnit')
+ eq(h.encode(), '''\
+wasnipoop; giraffes="very-long-necked-animals";
+ spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
+
+ def test_long_header_encode_with_tab_continuation(self):
+ eq = self.ndiffAssertEqual
+ h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
+ 'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
+ header_name='X-Foobar-Spoink-Defrobnit',
+ continuation_ws='\t')
+ eq(h.encode(), '''\
+wasnipoop; giraffes="very-long-necked-animals";
+\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
+
+ def test_header_splitter(self):
+ eq = self.ndiffAssertEqual
+ msg = MIMEText('')
+ # It'd be great if we could use add_header() here, but that doesn't
+ # guarantee an order of the parameters.
+ msg['X-Foobar-Spoink-Defrobnit'] = (
+ 'wasnipoop; giraffes="very-long-necked-animals"; '
+ 'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
+ sfp = StringIO()
+ g = Generator(sfp)
+ g.flatten(msg)
+ eq(sfp.getvalue(), '''\
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
+\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
+
+''')
+
+ def test_no_semis_header_splitter(self):
+ eq = self.ndiffAssertEqual
+ msg = Message()
+ msg['From'] = 'test@dom.ain'
+ msg['References'] = SPACE.join(['<%d@dom.ain>' % i for i in range(10)])
+ msg.set_payload('Test')
+ sfp = StringIO()
+ g = Generator(sfp)
+ g.flatten(msg)
+ eq(sfp.getvalue(), """\
+From: test@dom.ain
+References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
+\t<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
+
+Test""")
+
+ def test_no_split_long_header(self):
+ eq = self.ndiffAssertEqual
+ hstr = 'References: ' + 'x' * 80
+ h = Header(hstr, continuation_ws='\t')
+ eq(h.encode(), """\
+References: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
+
+ def test_splitting_multiple_long_lines(self):
+ eq = self.ndiffAssertEqual
+ hstr = """\
+from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
+"""
+ h = Header(hstr, continuation_ws='\t')
+ eq(h.encode(), """\
+from babylon.socal-raves.org (localhost [127.0.0.1]);
+\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
+\tfor <mailman-admin@babylon.socal-raves.org>;
+\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
+\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
+\tfor <mailman-admin@babylon.socal-raves.org>;
+\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
+\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
+\tfor <mailman-admin@babylon.socal-raves.org>;
+\tSat, 2 Feb 2002 17:00:06 -0800 (PST)""")
+
+ def test_splitting_first_line_only_is_long(self):
+ eq = self.ndiffAssertEqual
+ hstr = """\
+from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
+\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
+\tid 17k4h5-00034i-00
+\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
+ h = Header(hstr, maxlinelen=78, header_name='Received',
+ continuation_ws='\t')
+ eq(h.encode(), """\
+from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
+\thelo=cthulhu.gerg.ca)
+\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
+\tid 17k4h5-00034i-00
+\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
+
+ def test_long_8bit_header(self):
+ eq = self.ndiffAssertEqual
+ msg = Message()
+ h = Header('Britische Regierung gibt', 'iso-8859-1',
+ header_name='Subject')
+ h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
+ msg['Subject'] = h
+ eq(msg.as_string(), """\
+Subject: =?iso-8859-1?q?Britische_Regierung_gibt?= =?iso-8859-1?q?gr=FCnes?=
+ =?iso-8859-1?q?_Licht_f=FCr_Offshore-Windkraftprojekte?=
+
+""")
+
+ def test_long_8bit_header_no_charset(self):
+ eq = self.ndiffAssertEqual
+ msg = Message()
+ msg['Reply-To'] = 'Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address@example.com>'
+ eq(msg.as_string(), """\
+Reply-To: Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address@example.com>
+
+""")
+
+ def test_long_to_header(self):
+ eq = self.ndiffAssertEqual
+ to = '"Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,"Someone Test #B" <someone@umich.edu>, "Someone Test #C" <someone@eecs.umich.edu>, "Someone Test #D" <someone@eecs.umich.edu>'
+ msg = Message()
+ msg['To'] = to
+ eq(msg.as_string(0), '''\
+To: "Someone Test #A" <someone@eecs.umich.edu>, <someone@eecs.umich.edu>,
+\t"Someone Test #B" <someone@umich.edu>,
+\t"Someone Test #C" <someone@eecs.umich.edu>,
+\t"Someone Test #D" <someone@eecs.umich.edu>
+
+''')
+
+ def test_long_line_after_append(self):
+ eq = self.ndiffAssertEqual
+ s = 'This is an example of string which has almost the limit of header length.'
+ h = Header(s)
+ h.append('Add another line.')
+ eq(h.encode(), """\
+This is an example of string which has almost the limit of header length.
+ Add another line.""")
+
+ def test_shorter_line_with_append(self):
+ eq = self.ndiffAssertEqual
+ s = 'This is a shorter line.'
+ h = Header(s)
+ h.append('Add another sentence. (Surprise?)')
+ eq(h.encode(),
+ 'This is a shorter line. Add another sentence. (Surprise?)')
+
+ def test_long_field_name(self):
+ eq = self.ndiffAssertEqual
+ fn = 'X-Very-Very-Very-Long-Header-Name'
+ gs = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
+ h = Header(gs, 'iso-8859-1', header_name=fn)
+ # BAW: this seems broken because the first line is too long
+ eq(h.encode(), """\
+=?iso-8859-1?q?Die_Mieter_treten_hier_?=
+ =?iso-8859-1?q?ein_werden_mit_einem_Foerderband_komfortabel_den_Korridor_?=
+ =?iso-8859-1?q?entlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_g?=
+ =?iso-8859-1?q?egen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
+
+ def test_long_received_header(self):
+ h = 'from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; Wed, 05 Mar 2003 18:10:18 -0700'
+ msg = Message()
+ msg['Received-1'] = Header(h, continuation_ws='\t')
+ msg['Received-2'] = h
+ self.assertEqual(msg.as_string(), """\
+Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
+\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
+\tWed, 05 Mar 2003 18:10:18 -0700
+Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
+\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
+\tWed, 05 Mar 2003 18:10:18 -0700
+
+""")
+
+ def test_string_headerinst_eq(self):
+ h = '<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David Bremner\'s message of "Thu, 6 Mar 2003 13:58:21 +0100")'
+ msg = Message()
+ msg['Received-1'] = Header(h, header_name='Received-1',
+ continuation_ws='\t')
+ msg['Received-2'] = h
+ self.assertEqual(msg.as_string(), """\
+Received-1: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
+\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
+Received-2: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
+\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
+
+""")
+
+ def test_long_unbreakable_lines_with_continuation(self):
+ eq = self.ndiffAssertEqual
+ msg = Message()
+ t = """\
+ iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
+ locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
+ msg['Face-1'] = t
+ msg['Face-2'] = Header(t, header_name='Face-2')
+ eq(msg.as_string(), """\
+Face-1: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
+\tlocQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
+Face-2: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
+ locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
+
+""")
+
+ def test_another_long_multiline_header(self):
+ eq = self.ndiffAssertEqual
+ m = '''\
+Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with Microsoft SMTPSVC(5.0.2195.4905);
+\tWed, 16 Oct 2002 07:41:11 -0700'''
+ msg = email.message_from_string(m)
+ eq(msg.as_string(), '''\
+Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
+\tMicrosoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
+
+''')
+
+ def test_long_lines_with_different_header(self):
+ eq = self.ndiffAssertEqual
+ h = """\
+List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
+ <mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>"""
+ msg = Message()
+ msg['List'] = h
+ msg['List'] = Header(h, header_name='List')
+ eq(msg.as_string(), """\
+List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
+\t<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
+List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
+ <mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
+
+""")
+
+
+
+# Test mangling of "From " lines in the body of a message
+class TestFromMangling(unittest.TestCase):
+ def setUp(self):
+ self.msg = Message()
+ self.msg['From'] = 'aaa@bbb.org'
+ self.msg.set_payload("""\
+From the desk of A.A.A.:
+Blah blah blah
+""")
+
+ def test_mangled_from(self):
+ s = StringIO()
+ g = Generator(s, mangle_from_=True)
+ g.flatten(self.msg)
+ self.assertEqual(s.getvalue(), """\
+From: aaa@bbb.org
+
+>From the desk of A.A.A.:
+Blah blah blah
+""")
+
+ def test_dont_mangle_from(self):
+ s = StringIO()
+ g = Generator(s, mangle_from_=False)
+ g.flatten(self.msg)
+ self.assertEqual(s.getvalue(), """\
+From: aaa@bbb.org
+
+From the desk of A.A.A.:
+Blah blah blah
+""")
+
+
+
+# Test the basic MIMEAudio class
+class TestMIMEAudio(unittest.TestCase):
+ def setUp(self):
+ # Make sure we pick up the audiotest.au that lives in email/test/data.
+ # In Python, there's an audiotest.au living in Lib/test but that isn't
+ # included in some binary distros that don't include the test
+ # package. The trailing empty string on the .join() is significant
+ # since findfile() will do a dirname().
+ datadir = os.path.join(os.path.dirname(landmark), 'data', '')
+ fp = open(findfile('audiotest.au', datadir), 'rb')
+ try:
+ self._audiodata = fp.read()
+ finally:
+ fp.close()
+ self._au = MIMEAudio(self._audiodata)
+
+ def test_guess_minor_type(self):
+ self.assertEqual(self._au.get_content_type(), 'audio/basic')
+
+ def test_encoding(self):
+ payload = self._au.get_payload()
+ self.assertEqual(base64.decodestring(payload), self._audiodata)
+
+ def test_checkSetMinor(self):
+ au = MIMEAudio(self._audiodata, 'fish')
+ self.assertEqual(au.get_content_type(), 'audio/fish')
+
+ def test_add_header(self):
+ eq = self.assertEqual
+ unless = self.failUnless
+ self._au.add_header('Content-Disposition', 'attachment',
+ filename='audiotest.au')
+ eq(self._au['content-disposition'],
+ 'attachment; filename="audiotest.au"')
+ eq(self._au.get_params(header='content-disposition'),
+ [('attachment', ''), ('filename', 'audiotest.au')])
+ eq(self._au.get_param('filename', header='content-disposition'),
+ 'audiotest.au')
+ missing = []
+ eq(self._au.get_param('attachment', header='content-disposition'), '')
+ unless(self._au.get_param('foo', failobj=missing,
+ header='content-disposition') is missing)
+ # Try some missing stuff
+ unless(self._au.get_param('foobar', missing) is missing)
+ unless(self._au.get_param('attachment', missing,
+ header='foobar') is missing)
+
+
+
+# Test the basic MIMEImage class
+class TestMIMEImage(unittest.TestCase):
+ def setUp(self):
+ fp = openfile('PyBanner048.gif')
+ try:
+ self._imgdata = fp.read()
+ finally:
+ fp.close()
+ self._im = MIMEImage(self._imgdata)
+
+ def test_guess_minor_type(self):
+ self.assertEqual(self._im.get_content_type(), 'image/gif')
+
+ def test_encoding(self):
+ payload = self._im.get_payload()
+ self.assertEqual(base64.decodestring(payload), self._imgdata)
+
+ def test_checkSetMinor(self):
+ im = MIMEImage(self._imgdata, 'fish')
+ self.assertEqual(im.get_content_type(), 'image/fish')
+
+ def test_add_header(self):
+ eq = self.assertEqual
+ unless = self.failUnless
+ self._im.add_header('Content-Disposition', 'attachment',
+ filename='dingusfish.gif')
+ eq(self._im['content-disposition'],
+ 'attachment; filename="dingusfish.gif"')
+ eq(self._im.get_params(header='content-disposition'),
+ [('attachment', ''), ('filename', 'dingusfish.gif')])
+ eq(self._im.get_param('filename', header='content-disposition'),
+ 'dingusfish.gif')
+ missing = []
+ eq(self._im.get_param('attachment', header='content-disposition'), '')
+ unless(self._im.get_param('foo', failobj=missing,
+ header='content-disposition') is missing)
+ # Try some missing stuff
+ unless(self._im.get_param('foobar', missing) is missing)
+ unless(self._im.get_param('attachment', missing,
+ header='foobar') is missing)
+
+
+
+# Test the basic MIMEApplication class
+class TestMIMEApplication(unittest.TestCase):
+ def test_headers(self):
+ eq = self.assertEqual
+ msg = MIMEApplication('\xfa\xfb\xfc\xfd\xfe\xff')
+ eq(msg.get_content_type(), 'application/octet-stream')
+ eq(msg['content-transfer-encoding'], 'base64')
+
+ def test_body(self):
+ eq = self.assertEqual
+ bytes = '\xfa\xfb\xfc\xfd\xfe\xff'
+ msg = MIMEApplication(bytes)
+ eq(msg.get_payload(), '+vv8/f7/')
+ eq(msg.get_payload(decode=True), bytes)
+
+
+
+# Test the basic MIMEText class
+class TestMIMEText(unittest.TestCase):
+ def setUp(self):
+ self._msg = MIMEText('hello there')
+
+ def test_types(self):
+ eq = self.assertEqual
+ unless = self.failUnless
+ eq(self._msg.get_content_type(), 'text/plain')
+ eq(self._msg.get_param('charset'), 'us-ascii')
+ missing = []
+ unless(self._msg.get_param('foobar', missing) is missing)
+ unless(self._msg.get_param('charset', missing, header='foobar')
+ is missing)
+
+ def test_payload(self):
+ self.assertEqual(self._msg.get_payload(), 'hello there')
+ self.failUnless(not self._msg.is_multipart())
+
+ def test_charset(self):
+ eq = self.assertEqual
+ msg = MIMEText('hello there', _charset='us-ascii')
+ eq(msg.get_charset().input_charset, 'us-ascii')
+ eq(msg['content-type'], 'text/plain; charset="us-ascii"')
+
+
+
+# Test complicated multipart/* messages
+class TestMultipart(TestEmailBase):
+ def setUp(self):
+ fp = openfile('PyBanner048.gif')
+ try:
+ data = fp.read()
+ finally:
+ fp.close()
+
+ container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
+ image = MIMEImage(data, name='dingusfish.gif')
+ image.add_header('content-disposition', 'attachment',
+ filename='dingusfish.gif')
+ intro = MIMEText('''\
+Hi there,
+
+This is the dingus fish.
+''')
+ container.attach(intro)
+ container.attach(image)
+ container['From'] = 'Barry <barry@digicool.com>'
+ container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
+ container['Subject'] = 'Here is your dingus fish'
+
+ now = 987809702.54848599
+ timetuple = time.localtime(now)
+ if timetuple[-1] == 0:
+ tzsecs = time.timezone
+ else:
+ tzsecs = time.altzone
+ if tzsecs > 0:
+ sign = '-'
+ else:
+ sign = '+'
+ tzoffset = ' %s%04d' % (sign, tzsecs / 36)
+ container['Date'] = time.strftime(
+ '%a, %d %b %Y %H:%M:%S',
+ time.localtime(now)) + tzoffset
+ self._msg = container
+ self._im = image
+ self._txt = intro
+
+ def test_hierarchy(self):
+ # convenience
+ eq = self.assertEqual
+ unless = self.failUnless
+ raises = self.assertRaises
+ # tests
+ m = self._msg
+ unless(m.is_multipart())
+ eq(m.get_content_type(), 'multipart/mixed')
+ eq(len(m.get_payload()), 2)
+ raises(IndexError, m.get_payload, 2)
+ m0 = m.get_payload(0)
+ m1 = m.get_payload(1)
+ unless(m0 is self._txt)
+ unless(m1 is self._im)
+ eq(m.get_payload(), [m0, m1])
+ unless(not m0.is_multipart())
+ unless(not m1.is_multipart())
+
+ def test_empty_multipart_idempotent(self):
+ text = """\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+
+--BOUNDARY
+
+
+--BOUNDARY--
+"""
+ msg = Parser().parsestr(text)
+ self.ndiffAssertEqual(text, msg.as_string())
+
+ def test_no_parts_in_a_multipart_with_none_epilogue(self):
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.set_boundary('BOUNDARY')
+ self.ndiffAssertEqual(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+--BOUNDARY
+
+--BOUNDARY--''')
+
+ def test_no_parts_in_a_multipart_with_empty_epilogue(self):
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.preamble = ''
+ outer.epilogue = ''
+ outer.set_boundary('BOUNDARY')
+ self.ndiffAssertEqual(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+
+--BOUNDARY
+
+--BOUNDARY--
+''')
+
+ def test_one_part_in_a_multipart(self):
+ eq = self.ndiffAssertEqual
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.set_boundary('BOUNDARY')
+ msg = MIMEText('hello world')
+ outer.attach(msg)
+ eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--''')
+
+ def test_seq_parts_in_a_multipart_with_empty_preamble(self):
+ eq = self.ndiffAssertEqual
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.preamble = ''
+ msg = MIMEText('hello world')
+ outer.attach(msg)
+ outer.set_boundary('BOUNDARY')
+ eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--''')
+
+
+ def test_seq_parts_in_a_multipart_with_none_preamble(self):
+ eq = self.ndiffAssertEqual
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.preamble = None
+ msg = MIMEText('hello world')
+ outer.attach(msg)
+ outer.set_boundary('BOUNDARY')
+ eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--''')
+
+
+ def test_seq_parts_in_a_multipart_with_none_epilogue(self):
+ eq = self.ndiffAssertEqual
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.epilogue = None
+ msg = MIMEText('hello world')
+ outer.attach(msg)
+ outer.set_boundary('BOUNDARY')
+ eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--''')
+
+
+ def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
+ eq = self.ndiffAssertEqual
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.epilogue = ''
+ msg = MIMEText('hello world')
+ outer.attach(msg)
+ outer.set_boundary('BOUNDARY')
+ eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--
+''')
+
+
+ def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
+ eq = self.ndiffAssertEqual
+ outer = MIMEBase('multipart', 'mixed')
+ outer['Subject'] = 'A subject'
+ outer['To'] = 'aperson@dom.ain'
+ outer['From'] = 'bperson@dom.ain'
+ outer.epilogue = '\n'
+ msg = MIMEText('hello world')
+ outer.attach(msg)
+ outer.set_boundary('BOUNDARY')
+ eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson@dom.ain
+From: bperson@dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--
+
+''')
+
+ def test_message_external_body(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_36.txt')
+ eq(len(msg.get_payload()), 2)
+ msg1 = msg.get_payload(1)
+ eq(msg1.get_content_type(), 'multipart/alternative')
+ eq(len(msg1.get_payload()), 2)
+ for subpart in msg1.get_payload():
+ eq(subpart.get_content_type(), 'message/external-body')
+ eq(len(subpart.get_payload()), 1)
+ subsubpart = subpart.get_payload(0)
+ eq(subsubpart.get_content_type(), 'text/plain')
+
+ def test_double_boundary(self):
+ # msg_37.txt is a multipart that contains two dash-boundary's in a
+ # row. Our interpretation of RFC 2046 calls for ignoring the second
+ # and subsequent boundaries.
+ msg = self._msgobj('msg_37.txt')
+ self.assertEqual(len(msg.get_payload()), 3)
+
+ def test_nested_inner_contains_outer_boundary(self):
+ eq = self.ndiffAssertEqual
+ # msg_38.txt has an inner part that contains outer boundaries. My
+ # interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
+ # these are illegal and should be interpreted as unterminated inner
+ # parts.
+ msg = self._msgobj('msg_38.txt')
+ sfp = StringIO()
+ iterators._structure(msg, sfp)
+ eq(sfp.getvalue(), """\
+multipart/mixed
+ multipart/mixed
+ multipart/alternative
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+""")
+
+ def test_nested_with_same_boundary(self):
+ eq = self.ndiffAssertEqual
+ # msg 39.txt is similarly evil in that it's got inner parts that use
+ # the same boundary as outer parts. Again, I believe the way this is
+ # parsed is closest to the spirit of RFC 2046
+ msg = self._msgobj('msg_39.txt')
+ sfp = StringIO()
+ iterators._structure(msg, sfp)
+ eq(sfp.getvalue(), """\
+multipart/mixed
+ multipart/mixed
+ multipart/alternative
+ application/octet-stream
+ application/octet-stream
+ text/plain
+""")
+
+ def test_boundary_in_non_multipart(self):
+ msg = self._msgobj('msg_40.txt')
+ self.assertEqual(msg.as_string(), '''\
+MIME-Version: 1.0
+Content-Type: text/html; boundary="--961284236552522269"
+
+----961284236552522269
+Content-Type: text/html;
+Content-Transfer-Encoding: 7Bit
+
+<html></html>
+
+----961284236552522269--
+''')
+
+ def test_boundary_with_leading_space(self):
+ eq = self.assertEqual
+ msg = email.message_from_string('''\
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary=" XXXX"
+
+-- XXXX
+Content-Type: text/plain
+
+
+-- XXXX
+Content-Type: text/plain
+
+-- XXXX--
+''')
+ self.failUnless(msg.is_multipart())
+ eq(msg.get_boundary(), ' XXXX')
+ eq(len(msg.get_payload()), 2)
+
+ def test_boundary_without_trailing_newline(self):
+ m = Parser().parsestr("""\
+Content-Type: multipart/mixed; boundary="===============0012394164=="
+MIME-Version: 1.0
+
+--===============0012394164==
+Content-Type: image/file1.jpg
+MIME-Version: 1.0
+Content-Transfer-Encoding: base64
+
+YXNkZg==
+--===============0012394164==--""")
+ self.assertEquals(m.get_payload(0).get_payload(), 'YXNkZg==')
+
+
+
+# Test some badly formatted messages
+class TestNonConformant(TestEmailBase):
+ def test_parse_missing_minor_type(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_14.txt')
+ eq(msg.get_content_type(), 'text/plain')
+ eq(msg.get_content_maintype(), 'text')
+ eq(msg.get_content_subtype(), 'plain')
+
+ def test_same_boundary_inner_outer(self):
+ unless = self.failUnless
+ msg = self._msgobj('msg_15.txt')
+ # XXX We can probably eventually do better
+ inner = msg.get_payload(0)
+ unless(hasattr(inner, 'defects'))
+ self.assertEqual(len(inner.defects), 1)
+ unless(isinstance(inner.defects[0],
+ errors.StartBoundaryNotFoundDefect))
+
+ def test_multipart_no_boundary(self):
+ unless = self.failUnless
+ msg = self._msgobj('msg_25.txt')
+ unless(isinstance(msg.get_payload(), str))
+ self.assertEqual(len(msg.defects), 2)
+ unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
+ unless(isinstance(msg.defects[1],
+ errors.MultipartInvariantViolationDefect))
+
+ def test_invalid_content_type(self):
+ eq = self.assertEqual
+ neq = self.ndiffAssertEqual
+ msg = Message()
+ # RFC 2045, $5.2 says invalid yields text/plain
+ msg['Content-Type'] = 'text'
+ eq(msg.get_content_maintype(), 'text')
+ eq(msg.get_content_subtype(), 'plain')
+ eq(msg.get_content_type(), 'text/plain')
+ # Clear the old value and try something /really/ invalid
+ del msg['content-type']
+ msg['Content-Type'] = 'foo'
+ eq(msg.get_content_maintype(), 'text')
+ eq(msg.get_content_subtype(), 'plain')
+ eq(msg.get_content_type(), 'text/plain')
+ # Still, make sure that the message is idempotently generated
+ s = StringIO()
+ g = Generator(s)
+ g.flatten(msg)
+ neq(s.getvalue(), 'Content-Type: foo\n\n')
+
+ def test_no_start_boundary(self):
+ eq = self.ndiffAssertEqual
+ msg = self._msgobj('msg_31.txt')
+ eq(msg.get_payload(), """\
+--BOUNDARY
+Content-Type: text/plain
+
+message 1
+
+--BOUNDARY
+Content-Type: text/plain
+
+message 2
+
+--BOUNDARY--
+""")
+
+ def test_no_separating_blank_line(self):
+ eq = self.ndiffAssertEqual
+ msg = self._msgobj('msg_35.txt')
+ eq(msg.as_string(), """\
+From: aperson@dom.ain
+To: bperson@dom.ain
+Subject: here's something interesting
+
+counter to RFC 2822, there's no separating newline here
+""")
+
+ def test_lying_multipart(self):
+ unless = self.failUnless
+ msg = self._msgobj('msg_41.txt')
+ unless(hasattr(msg, 'defects'))
+ self.assertEqual(len(msg.defects), 2)
+ unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
+ unless(isinstance(msg.defects[1],
+ errors.MultipartInvariantViolationDefect))
+
+ def test_missing_start_boundary(self):
+ outer = self._msgobj('msg_42.txt')
+ # The message structure is:
+ #
+ # multipart/mixed
+ # text/plain
+ # message/rfc822
+ # multipart/mixed [*]
+ #
+ # [*] This message is missing its start boundary
+ bad = outer.get_payload(1).get_payload(0)
+ self.assertEqual(len(bad.defects), 1)
+ self.failUnless(isinstance(bad.defects[0],
+ errors.StartBoundaryNotFoundDefect))
+
+
+
+# Test RFC 2047 header encoding and decoding
+class TestRFC2047(unittest.TestCase):
+ def test_rfc2047_multiline(self):
+ eq = self.assertEqual
+ s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
+ foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
+ dh = decode_header(s)
+ eq(dh, [
+ ('Re:', None),
+ ('r\x8aksm\x9arg\x8cs', 'mac-iceland'),
+ ('baz foo bar', None),
+ ('r\x8aksm\x9arg\x8cs', 'mac-iceland')])
+ eq(str(make_header(dh)),
+ """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar
+ =?mac-iceland?q?r=8Aksm=9Arg=8Cs?=""")
+
+ def test_whitespace_eater_unicode(self):
+ eq = self.assertEqual
+ s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
+ dh = decode_header(s)
+ eq(dh, [('Andr\xe9', 'iso-8859-1'), ('Pirard <pirard@dom.ain>', None)])
+ hu = unicode(make_header(dh)).encode('latin-1')
+ eq(hu, 'Andr\xe9 Pirard <pirard@dom.ain>')
+
+ def test_whitespace_eater_unicode_2(self):
+ eq = self.assertEqual
+ s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
+ dh = decode_header(s)
+ eq(dh, [('The', None), ('quick brown fox', 'iso-8859-1'),
+ ('jumped over the', None), ('lazy dog', 'iso-8859-1')])
+ hu = make_header(dh).__unicode__()
+ eq(hu, u'The quick brown fox jumped over the lazy dog')
+
+
+
+# Test the MIMEMessage class
+class TestMIMEMessage(TestEmailBase):
+ def setUp(self):
+ fp = openfile('msg_11.txt')
+ try:
+ self._text = fp.read()
+ finally:
+ fp.close()
+
+ def test_type_error(self):
+ self.assertRaises(TypeError, MIMEMessage, 'a plain string')
+
+ def test_valid_argument(self):
+ eq = self.assertEqual
+ unless = self.failUnless
+ subject = 'A sub-message'
+ m = Message()
+ m['Subject'] = subject
+ r = MIMEMessage(m)
+ eq(r.get_content_type(), 'message/rfc822')
+ payload = r.get_payload()
+ unless(isinstance(payload, list))
+ eq(len(payload), 1)
+ subpart = payload[0]
+ unless(subpart is m)
+ eq(subpart['subject'], subject)
+
+ def test_bad_multipart(self):
+ eq = self.assertEqual
+ msg1 = Message()
+ msg1['Subject'] = 'subpart 1'
+ msg2 = Message()
+ msg2['Subject'] = 'subpart 2'
+ r = MIMEMessage(msg1)
+ self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
+
+ def test_generate(self):
+ # First craft the message to be encapsulated
+ m = Message()
+ m['Subject'] = 'An enclosed message'
+ m.set_payload('Here is the body of the message.\n')
+ r = MIMEMessage(m)
+ r['Subject'] = 'The enclosing message'
+ s = StringIO()
+ g = Generator(s)
+ g.flatten(r)
+ self.assertEqual(s.getvalue(), """\
+Content-Type: message/rfc822
+MIME-Version: 1.0
+Subject: The enclosing message
+
+Subject: An enclosed message
+
+Here is the body of the message.
+""")
+
+ def test_parse_message_rfc822(self):
+ eq = self.assertEqual
+ unless = self.failUnless
+ msg = self._msgobj('msg_11.txt')
+ eq(msg.get_content_type(), 'message/rfc822')
+ payload = msg.get_payload()
+ unless(isinstance(payload, list))
+ eq(len(payload), 1)
+ submsg = payload[0]
+ self.failUnless(isinstance(submsg, Message))
+ eq(submsg['subject'], 'An enclosed message')
+ eq(submsg.get_payload(), 'Here is the body of the message.\n')
+
+ def test_dsn(self):
+ eq = self.assertEqual
+ unless = self.failUnless
+ # msg 16 is a Delivery Status Notification, see RFC 1894
+ msg = self._msgobj('msg_16.txt')
+ eq(msg.get_content_type(), 'multipart/report')
+ unless(msg.is_multipart())
+ eq(len(msg.get_payload()), 3)
+ # Subpart 1 is a text/plain, human readable section
+ subpart = msg.get_payload(0)
+ eq(subpart.get_content_type(), 'text/plain')
+ eq(subpart.get_payload(), """\
+This report relates to a message you sent with the following header fields:
+
+ Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
+ Date: Sun, 23 Sep 2001 20:10:55 -0700
+ From: "Ian T. Henry" <henryi@oxy.edu>
+ To: SoCal Raves <scr@socal-raves.org>
+ Subject: [scr] yeah for Ians!!
+
+Your message cannot be delivered to the following recipients:
+
+ Recipient address: jangel1@cougar.noc.ucla.edu
+ Reason: recipient reached disk quota
+
+""")
+ # Subpart 2 contains the machine parsable DSN information. It
+ # consists of two blocks of headers, represented by two nested Message
+ # objects.
+ subpart = msg.get_payload(1)
+ eq(subpart.get_content_type(), 'message/delivery-status')
+ eq(len(subpart.get_payload()), 2)
+ # message/delivery-status should treat each block as a bunch of
+ # headers, i.e. a bunch of Message objects.
+ dsn1 = subpart.get_payload(0)
+ unless(isinstance(dsn1, Message))
+ eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
+ eq(dsn1.get_param('dns', header='reporting-mta'), '')
+ # Try a missing one <wink>
+ eq(dsn1.get_param('nsd', header='reporting-mta'), None)
+ dsn2 = subpart.get_payload(1)
+ unless(isinstance(dsn2, Message))
+ eq(dsn2['action'], 'failed')
+ eq(dsn2.get_params(header='original-recipient'),
+ [('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
+ eq(dsn2.get_param('rfc822', header='final-recipient'), '')
+ # Subpart 3 is the original message
+ subpart = msg.get_payload(2)
+ eq(subpart.get_content_type(), 'message/rfc822')
+ payload = subpart.get_payload()
+ unless(isinstance(payload, list))
+ eq(len(payload), 1)
+ subsubpart = payload[0]
+ unless(isinstance(subsubpart, Message))
+ eq(subsubpart.get_content_type(), 'text/plain')
+ eq(subsubpart['message-id'],
+ '<002001c144a6$8752e060$56104586@oxy.edu>')
+
+ def test_epilogue(self):
+ eq = self.ndiffAssertEqual
+ fp = openfile('msg_21.txt')
+ try:
+ text = fp.read()
+ finally:
+ fp.close()
+ msg = Message()
+ msg['From'] = 'aperson@dom.ain'
+ msg['To'] = 'bperson@dom.ain'
+ msg['Subject'] = 'Test'
+ msg.preamble = 'MIME message'
+ msg.epilogue = 'End of MIME message\n'
+ msg1 = MIMEText('One')
+ msg2 = MIMEText('Two')
+ msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
+ msg.attach(msg1)
+ msg.attach(msg2)
+ sfp = StringIO()
+ g = Generator(sfp)
+ g.flatten(msg)
+ eq(sfp.getvalue(), text)
+
+ def test_no_nl_preamble(self):
+ eq = self.ndiffAssertEqual
+ msg = Message()
+ msg['From'] = 'aperson@dom.ain'
+ msg['To'] = 'bperson@dom.ain'
+ msg['Subject'] = 'Test'
+ msg.preamble = 'MIME message'
+ msg.epilogue = ''
+ msg1 = MIMEText('One')
+ msg2 = MIMEText('Two')
+ msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
+ msg.attach(msg1)
+ msg.attach(msg2)
+ eq(msg.as_string(), """\
+From: aperson@dom.ain
+To: bperson@dom.ain
+Subject: Test
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+MIME message
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+One
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+Two
+--BOUNDARY--
+""")
+
+ def test_default_type(self):
+ eq = self.assertEqual
+ fp = openfile('msg_30.txt')
+ try:
+ msg = email.message_from_file(fp)
+ finally:
+ fp.close()
+ container1 = msg.get_payload(0)
+ eq(container1.get_default_type(), 'message/rfc822')
+ eq(container1.get_content_type(), 'message/rfc822')
+ container2 = msg.get_payload(1)
+ eq(container2.get_default_type(), 'message/rfc822')
+ eq(container2.get_content_type(), 'message/rfc822')
+ container1a = container1.get_payload(0)
+ eq(container1a.get_default_type(), 'text/plain')
+ eq(container1a.get_content_type(), 'text/plain')
+ container2a = container2.get_payload(0)
+ eq(container2a.get_default_type(), 'text/plain')
+ eq(container2a.get_content_type(), 'text/plain')
+
+ def test_default_type_with_explicit_container_type(self):
+ eq = self.assertEqual
+ fp = openfile('msg_28.txt')
+ try:
+ msg = email.message_from_file(fp)
+ finally:
+ fp.close()
+ container1 = msg.get_payload(0)
+ eq(container1.get_default_type(), 'message/rfc822')
+ eq(container1.get_content_type(), 'message/rfc822')
+ container2 = msg.get_payload(1)
+ eq(container2.get_default_type(), 'message/rfc822')
+ eq(container2.get_content_type(), 'message/rfc822')
+ container1a = container1.get_payload(0)
+ eq(container1a.get_default_type(), 'text/plain')
+ eq(container1a.get_content_type(), 'text/plain')
+ container2a = container2.get_payload(0)
+ eq(container2a.get_default_type(), 'text/plain')
+ eq(container2a.get_content_type(), 'text/plain')
+
+ def test_default_type_non_parsed(self):
+ eq = self.assertEqual
+ neq = self.ndiffAssertEqual
+ # Set up container
+ container = MIMEMultipart('digest', 'BOUNDARY')
+ container.epilogue = ''
+ # Set up subparts
+ subpart1a = MIMEText('message 1\n')
+ subpart2a = MIMEText('message 2\n')
+ subpart1 = MIMEMessage(subpart1a)
+ subpart2 = MIMEMessage(subpart2a)
+ container.attach(subpart1)
+ container.attach(subpart2)
+ eq(subpart1.get_content_type(), 'message/rfc822')
+ eq(subpart1.get_default_type(), 'message/rfc822')
+ eq(subpart2.get_content_type(), 'message/rfc822')
+ eq(subpart2.get_default_type(), 'message/rfc822')
+ neq(container.as_string(0), '''\
+Content-Type: multipart/digest; boundary="BOUNDARY"
+MIME-Version: 1.0
+
+--BOUNDARY
+Content-Type: message/rfc822
+MIME-Version: 1.0
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 1
+
+--BOUNDARY
+Content-Type: message/rfc822
+MIME-Version: 1.0
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 2
+
+--BOUNDARY--
+''')
+ del subpart1['content-type']
+ del subpart1['mime-version']
+ del subpart2['content-type']
+ del subpart2['mime-version']
+ eq(subpart1.get_content_type(), 'message/rfc822')
+ eq(subpart1.get_default_type(), 'message/rfc822')
+ eq(subpart2.get_content_type(), 'message/rfc822')
+ eq(subpart2.get_default_type(), 'message/rfc822')
+ neq(container.as_string(0), '''\
+Content-Type: multipart/digest; boundary="BOUNDARY"
+MIME-Version: 1.0
+
+--BOUNDARY
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 1
+
+--BOUNDARY
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 2
+
+--BOUNDARY--
+''')
+
+ def test_mime_attachments_in_constructor(self):
+ eq = self.assertEqual
+ text1 = MIMEText('')
+ text2 = MIMEText('')
+ msg = MIMEMultipart(_subparts=(text1, text2))
+ eq(len(msg.get_payload()), 2)
+ eq(msg.get_payload(0), text1)
+ eq(msg.get_payload(1), text2)
+
+
+
+# A general test of parser->model->generator idempotency. IOW, read a message
+# in, parse it into a message object tree, then without touching the tree,
+# regenerate the plain text. The original text and the transformed text
+# should be identical. Note: that we ignore the Unix-From since that may
+# contain a changed date.
+class TestIdempotent(TestEmailBase):
+ def _msgobj(self, filename):
+ fp = openfile(filename)
+ try:
+ data = fp.read()
+ finally:
+ fp.close()
+ msg = email.message_from_string(data)
+ return msg, data
+
+ def _idempotent(self, msg, text):
+ eq = self.ndiffAssertEqual
+ s = StringIO()
+ g = Generator(s, maxheaderlen=0)
+ g.flatten(msg)
+ eq(text, s.getvalue())
+
+ def test_parse_text_message(self):
+ eq = self.assertEquals
+ msg, text = self._msgobj('msg_01.txt')
+ eq(msg.get_content_type(), 'text/plain')
+ eq(msg.get_content_maintype(), 'text')
+ eq(msg.get_content_subtype(), 'plain')
+ eq(msg.get_params()[1], ('charset', 'us-ascii'))
+ eq(msg.get_param('charset'), 'us-ascii')
+ eq(msg.preamble, None)
+ eq(msg.epilogue, None)
+ self._idempotent(msg, text)
+
+ def test_parse_untyped_message(self):
+ eq = self.assertEquals
+ msg, text = self._msgobj('msg_03.txt')
+ eq(msg.get_content_type(), 'text/plain')
+ eq(msg.get_params(), None)
+ eq(msg.get_param('charset'), None)
+ self._idempotent(msg, text)
+
+ def test_simple_multipart(self):
+ msg, text = self._msgobj('msg_04.txt')
+ self._idempotent(msg, text)
+
+ def test_MIME_digest(self):
+ msg, text = self._msgobj('msg_02.txt')
+ self._idempotent(msg, text)
+
+ def test_long_header(self):
+ msg, text = self._msgobj('msg_27.txt')
+ self._idempotent(msg, text)
+
+ def test_MIME_digest_with_part_headers(self):
+ msg, text = self._msgobj('msg_28.txt')
+ self._idempotent(msg, text)
+
+ def test_mixed_with_image(self):
+ msg, text = self._msgobj('msg_06.txt')
+ self._idempotent(msg, text)
+
+ def test_multipart_report(self):
+ msg, text = self._msgobj('msg_05.txt')
+ self._idempotent(msg, text)
+
+ def test_dsn(self):
+ msg, text = self._msgobj('msg_16.txt')
+ self._idempotent(msg, text)
+
+ def test_preamble_epilogue(self):
+ msg, text = self._msgobj('msg_21.txt')
+ self._idempotent(msg, text)
+
+ def test_multipart_one_part(self):
+ msg, text = self._msgobj('msg_23.txt')
+ self._idempotent(msg, text)
+
+ def test_multipart_no_parts(self):
+ msg, text = self._msgobj('msg_24.txt')
+ self._idempotent(msg, text)
+
+ def test_no_start_boundary(self):
+ msg, text = self._msgobj('msg_31.txt')
+ self._idempotent(msg, text)
+
+ def test_rfc2231_charset(self):
+ msg, text = self._msgobj('msg_32.txt')
+ self._idempotent(msg, text)
+
+ def test_more_rfc2231_parameters(self):
+ msg, text = self._msgobj('msg_33.txt')
+ self._idempotent(msg, text)
+
+ def test_text_plain_in_a_multipart_digest(self):
+ msg, text = self._msgobj('msg_34.txt')
+ self._idempotent(msg, text)
+
+ def test_nested_multipart_mixeds(self):
+ msg, text = self._msgobj('msg_12a.txt')
+ self._idempotent(msg, text)
+
+ def test_message_external_body_idempotent(self):
+ msg, text = self._msgobj('msg_36.txt')
+ self._idempotent(msg, text)
+
+ def test_content_type(self):
+ eq = self.assertEquals
+ unless = self.failUnless
+ # Get a message object and reset the seek pointer for other tests
+ msg, text = self._msgobj('msg_05.txt')
+ eq(msg.get_content_type(), 'multipart/report')
+ # Test the Content-Type: parameters
+ params = {}
+ for pk, pv in msg.get_params():
+ params[pk] = pv
+ eq(params['report-type'], 'delivery-status')
+ eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
+ eq(msg.preamble, 'This is a MIME-encapsulated message.\n')
+ eq(msg.epilogue, '\n')
+ eq(len(msg.get_payload()), 3)
+ # Make sure the subparts are what we expect
+ msg1 = msg.get_payload(0)
+ eq(msg1.get_content_type(), 'text/plain')
+ eq(msg1.get_payload(), 'Yadda yadda yadda\n')
+ msg2 = msg.get_payload(1)
+ eq(msg2.get_content_type(), 'text/plain')
+ eq(msg2.get_payload(), 'Yadda yadda yadda\n')
+ msg3 = msg.get_payload(2)
+ eq(msg3.get_content_type(), 'message/rfc822')
+ self.failUnless(isinstance(msg3, Message))
+ payload = msg3.get_payload()
+ unless(isinstance(payload, list))
+ eq(len(payload), 1)
+ msg4 = payload[0]
+ unless(isinstance(msg4, Message))
+ eq(msg4.get_payload(), 'Yadda yadda yadda\n')
+
+ def test_parser(self):
+ eq = self.assertEquals
+ unless = self.failUnless
+ msg, text = self._msgobj('msg_06.txt')
+ # Check some of the outer headers
+ eq(msg.get_content_type(), 'message/rfc822')
+ # Make sure the payload is a list of exactly one sub-Message, and that
+ # that submessage has a type of text/plain
+ payload = msg.get_payload()
+ unless(isinstance(payload, list))
+ eq(len(payload), 1)
+ msg1 = payload[0]
+ self.failUnless(isinstance(msg1, Message))
+ eq(msg1.get_content_type(), 'text/plain')
+ self.failUnless(isinstance(msg1.get_payload(), str))
+ eq(msg1.get_payload(), '\n')
+
+
+
+# Test various other bits of the package's functionality
+class TestMiscellaneous(TestEmailBase):
+ def test_message_from_string(self):
+ fp = openfile('msg_01.txt')
+ try:
+ text = fp.read()
+ finally:
+ fp.close()
+ msg = email.message_from_string(text)
+ s = StringIO()
+ # Don't wrap/continue long headers since we're trying to test
+ # idempotency.
+ g = Generator(s, maxheaderlen=0)
+ g.flatten(msg)
+ self.assertEqual(text, s.getvalue())
+
+ def test_message_from_file(self):
+ fp = openfile('msg_01.txt')
+ try:
+ text = fp.read()
+ fp.seek(0)
+ msg = email.message_from_file(fp)
+ s = StringIO()
+ # Don't wrap/continue long headers since we're trying to test
+ # idempotency.
+ g = Generator(s, maxheaderlen=0)
+ g.flatten(msg)
+ self.assertEqual(text, s.getvalue())
+ finally:
+ fp.close()
+
+ def test_message_from_string_with_class(self):
+ unless = self.failUnless
+ fp = openfile('msg_01.txt')
+ try:
+ text = fp.read()
+ finally:
+ fp.close()
+ # Create a subclass
+ class MyMessage(Message):
+ pass
+
+ msg = email.message_from_string(text, MyMessage)
+ unless(isinstance(msg, MyMessage))
+ # Try something more complicated
+ fp = openfile('msg_02.txt')
+ try:
+ text = fp.read()
+ finally:
+ fp.close()
+ msg = email.message_from_string(text, MyMessage)
+ for subpart in msg.walk():
+ unless(isinstance(subpart, MyMessage))
+
+ def test_message_from_file_with_class(self):
+ unless = self.failUnless
+ # Create a subclass
+ class MyMessage(Message):
+ pass
+
+ fp = openfile('msg_01.txt')
+ try:
+ msg = email.message_from_file(fp, MyMessage)
+ finally:
+ fp.close()
+ unless(isinstance(msg, MyMessage))
+ # Try something more complicated
+ fp = openfile('msg_02.txt')
+ try:
+ msg = email.message_from_file(fp, MyMessage)
+ finally:
+ fp.close()
+ for subpart in msg.walk():
+ unless(isinstance(subpart, MyMessage))
+
+ def test__all__(self):
+ module = __import__('email')
+ # Can't use sorted() here due to Python 2.3 compatibility
+ all = module.__all__[:]
+ all.sort()
+ self.assertEqual(all, [
+ # Old names
+ 'Charset', 'Encoders', 'Errors', 'Generator',
+ 'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
+ 'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
+ 'MIMENonMultipart', 'MIMEText', 'Message',
+ 'Parser', 'Utils', 'base64MIME',
+ # new names
+ 'base64mime', 'charset', 'encoders', 'errors', 'generator',
+ 'header', 'iterators', 'message', 'message_from_file',
+ 'message_from_string', 'mime', 'parser',
+ 'quopriMIME', 'quoprimime', 'utils',
+ ])
+
+ def test_formatdate(self):
+ now = time.time()
+ self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
+ time.gmtime(now)[:6])
+
+ def test_formatdate_localtime(self):
+ now = time.time()
+ self.assertEqual(
+ utils.parsedate(utils.formatdate(now, localtime=True))[:6],
+ time.localtime(now)[:6])
+
+ def test_formatdate_usegmt(self):
+ now = time.time()
+ self.assertEqual(
+ utils.formatdate(now, localtime=False),
+ time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
+ self.assertEqual(
+ utils.formatdate(now, localtime=False, usegmt=True),
+ time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
+
+ def test_parsedate_none(self):
+ self.assertEqual(utils.parsedate(''), None)
+
+ def test_parsedate_compact(self):
+ # The FWS after the comma is optional
+ self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
+ utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
+
+ def test_parsedate_no_dayofweek(self):
+ eq = self.assertEqual
+ eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
+ (2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
+
+ def test_parsedate_compact_no_dayofweek(self):
+ eq = self.assertEqual
+ eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
+ (2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
+
+ def test_parsedate_acceptable_to_time_functions(self):
+ eq = self.assertEqual
+ timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
+ t = int(time.mktime(timetup))
+ eq(time.localtime(t)[:6], timetup[:6])
+ eq(int(time.strftime('%Y', timetup)), 2003)
+ timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
+ t = int(time.mktime(timetup[:9]))
+ eq(time.localtime(t)[:6], timetup[:6])
+ eq(int(time.strftime('%Y', timetup[:9])), 2003)
+
+ def test_parseaddr_empty(self):
+ self.assertEqual(utils.parseaddr('<>'), ('', ''))
+ self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
+
+ def test_noquote_dump(self):
+ self.assertEqual(
+ utils.formataddr(('A Silly Person', 'person@dom.ain')),
+ 'A Silly Person <person@dom.ain>')
+
+ def test_escape_dump(self):
+ self.assertEqual(
+ utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
+ r'"A \(Very\) Silly Person" <person@dom.ain>')
+ a = r'A \(Special\) Person'
+ b = 'person@dom.ain'
+ self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
+
+ def test_escape_backslashes(self):
+ self.assertEqual(
+ utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
+ r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
+ a = r'Arthur \Backslash\ Foobar'
+ b = 'person@dom.ain'
+ self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
+
+ def test_name_with_dot(self):
+ x = 'John X. Doe <jxd@example.com>'
+ y = '"John X. Doe" <jxd@example.com>'
+ a, b = ('John X. Doe', 'jxd@example.com')
+ self.assertEqual(utils.parseaddr(x), (a, b))
+ self.assertEqual(utils.parseaddr(y), (a, b))
+ # formataddr() quotes the name if there's a dot in it
+ self.assertEqual(utils.formataddr((a, b)), y)
+
+ def test_quote_dump(self):
+ self.assertEqual(
+ utils.formataddr(('A Silly; Person', 'person@dom.ain')),
+ r'"A Silly; Person" <person@dom.ain>')
+
+ def test_fix_eols(self):
+ eq = self.assertEqual
+ eq(utils.fix_eols('hello'), 'hello')
+ eq(utils.fix_eols('hello\n'), 'hello\r\n')
+ eq(utils.fix_eols('hello\r'), 'hello\r\n')
+ eq(utils.fix_eols('hello\r\n'), 'hello\r\n')
+ eq(utils.fix_eols('hello\n\r'), 'hello\r\n\r\n')
+
+ def test_charset_richcomparisons(self):
+ eq = self.assertEqual
+ ne = self.failIfEqual
+ cset1 = Charset()
+ cset2 = Charset()
+ eq(cset1, 'us-ascii')
+ eq(cset1, 'US-ASCII')
+ eq(cset1, 'Us-AsCiI')
+ eq('us-ascii', cset1)
+ eq('US-ASCII', cset1)
+ eq('Us-AsCiI', cset1)
+ ne(cset1, 'usascii')
+ ne(cset1, 'USASCII')
+ ne(cset1, 'UsAsCiI')
+ ne('usascii', cset1)
+ ne('USASCII', cset1)
+ ne('UsAsCiI', cset1)
+ eq(cset1, cset2)
+ eq(cset2, cset1)
+
+ def test_getaddresses(self):
+ eq = self.assertEqual
+ eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
+ 'Bud Person <bperson@dom.ain>']),
+ [('Al Person', 'aperson@dom.ain'),
+ ('Bud Person', 'bperson@dom.ain')])
+
+ def test_getaddresses_nasty(self):
+ eq = self.assertEqual
+ eq(utils.getaddresses(['foo: ;']), [('', '')])
+ eq(utils.getaddresses(
+ ['[]*-- =~$']),
+ [('', ''), ('', ''), ('', '*--')])
+ eq(utils.getaddresses(
+ ['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
+ [('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
+
+ def test_utils_quote_unquote(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.add_header('content-disposition', 'attachment',
+ filename='foo\\wacky"name')
+ eq(msg.get_filename(), 'foo\\wacky"name')
+
+ def test_get_body_encoding_with_bogus_charset(self):
+ charset = Charset('not a charset')
+ self.assertEqual(charset.get_body_encoding(), 'base64')
+
+ def test_get_body_encoding_with_uppercase_charset(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg['Content-Type'] = 'text/plain; charset=UTF-8'
+ eq(msg['content-type'], 'text/plain; charset=UTF-8')
+ charsets = msg.get_charsets()
+ eq(len(charsets), 1)
+ eq(charsets[0], 'utf-8')
+ charset = Charset(charsets[0])
+ eq(charset.get_body_encoding(), 'base64')
+ msg.set_payload('hello world', charset=charset)
+ eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
+ eq(msg.get_payload(decode=True), 'hello world')
+ eq(msg['content-transfer-encoding'], 'base64')
+ # Try another one
+ msg = Message()
+ msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
+ charsets = msg.get_charsets()
+ eq(len(charsets), 1)
+ eq(charsets[0], 'us-ascii')
+ charset = Charset(charsets[0])
+ eq(charset.get_body_encoding(), encoders.encode_7or8bit)
+ msg.set_payload('hello world', charset=charset)
+ eq(msg.get_payload(), 'hello world')
+ eq(msg['content-transfer-encoding'], '7bit')
+
+ def test_charsets_case_insensitive(self):
+ lc = Charset('us-ascii')
+ uc = Charset('US-ASCII')
+ self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
+
+ def test_partial_falls_inside_message_delivery_status(self):
+ eq = self.ndiffAssertEqual
+ # The Parser interface provides chunks of data to FeedParser in 8192
+ # byte gulps. SF bug #1076485 found one of those chunks inside
+ # message/delivery-status header block, which triggered an
+ # unreadline() of NeedMoreData.
+ msg = self._msgobj('msg_43.txt')
+ sfp = StringIO()
+ iterators._structure(msg, sfp)
+ eq(sfp.getvalue(), """\
+multipart/report
+ text/plain
+ message/delivery-status
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/plain
+ text/rfc822-headers
+""")
+
+
+
+# Test the iterator/generators
+class TestIterators(TestEmailBase):
+ def test_body_line_iterator(self):
+ eq = self.assertEqual
+ neq = self.ndiffAssertEqual
+ # First a simple non-multipart message
+ msg = self._msgobj('msg_01.txt')
+ it = iterators.body_line_iterator(msg)
+ lines = list(it)
+ eq(len(lines), 6)
+ neq(EMPTYSTRING.join(lines), msg.get_payload())
+ # Now a more complicated multipart
+ msg = self._msgobj('msg_02.txt')
+ it = iterators.body_line_iterator(msg)
+ lines = list(it)
+ eq(len(lines), 43)
+ fp = openfile('msg_19.txt')
+ try:
+ neq(EMPTYSTRING.join(lines), fp.read())
+ finally:
+ fp.close()
+
+ def test_typed_subpart_iterator(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_04.txt')
+ it = iterators.typed_subpart_iterator(msg, 'text')
+ lines = []
+ subparts = 0
+ for subpart in it:
+ subparts += 1
+ lines.append(subpart.get_payload())
+ eq(subparts, 2)
+ eq(EMPTYSTRING.join(lines), """\
+a simple kind of mirror
+to reflect upon our own
+a simple kind of mirror
+to reflect upon our own
+""")
+
+ def test_typed_subpart_iterator_default_type(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_03.txt')
+ it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
+ lines = []
+ subparts = 0
+ for subpart in it:
+ subparts += 1
+ lines.append(subpart.get_payload())
+ eq(subparts, 1)
+ eq(EMPTYSTRING.join(lines), """\
+
+Hi,
+
+Do you like this message?
+
+-Me
+""")
+
+
+
+class TestParsers(TestEmailBase):
+ def test_header_parser(self):
+ eq = self.assertEqual
+ # Parse only the headers of a complex multipart MIME document
+ fp = openfile('msg_02.txt')
+ try:
+ msg = HeaderParser().parse(fp)
+ finally:
+ fp.close()
+ eq(msg['from'], 'ppp-request@zzz.org')
+ eq(msg['to'], 'ppp@zzz.org')
+ eq(msg.get_content_type(), 'multipart/mixed')
+ self.failIf(msg.is_multipart())
+ self.failUnless(isinstance(msg.get_payload(), str))
+
+ def test_whitespace_continuation(self):
+ eq = self.assertEqual
+ # This message contains a line after the Subject: header that has only
+ # whitespace, but it is not empty!
+ msg = email.message_from_string("""\
+From: aperson@dom.ain
+To: bperson@dom.ain
+Subject: the next line has a space on it
+\x20
+Date: Mon, 8 Apr 2002 15:09:19 -0400
+Message-ID: spam
+
+Here's the message body
+""")
+ eq(msg['subject'], 'the next line has a space on it\n ')
+ eq(msg['message-id'], 'spam')
+ eq(msg.get_payload(), "Here's the message body\n")
+
+ def test_whitespace_continuation_last_header(self):
+ eq = self.assertEqual
+ # Like the previous test, but the subject line is the last
+ # header.
+ msg = email.message_from_string("""\
+From: aperson@dom.ain
+To: bperson@dom.ain
+Date: Mon, 8 Apr 2002 15:09:19 -0400
+Message-ID: spam
+Subject: the next line has a space on it
+\x20
+
+Here's the message body
+""")
+ eq(msg['subject'], 'the next line has a space on it\n ')
+ eq(msg['message-id'], 'spam')
+ eq(msg.get_payload(), "Here's the message body\n")
+
+ def test_crlf_separation(self):
+ eq = self.assertEqual
+ fp = openfile('msg_26.txt', mode='rb')
+ try:
+ msg = Parser().parse(fp)
+ finally:
+ fp.close()
+ eq(len(msg.get_payload()), 2)
+ part1 = msg.get_payload(0)
+ eq(part1.get_content_type(), 'text/plain')
+ eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
+ part2 = msg.get_payload(1)
+ eq(part2.get_content_type(), 'application/riscos')
+
+ def test_multipart_digest_with_extra_mime_headers(self):
+ eq = self.assertEqual
+ neq = self.ndiffAssertEqual
+ fp = openfile('msg_28.txt')
+ try:
+ msg = email.message_from_file(fp)
+ finally:
+ fp.close()
+ # Structure is:
+ # multipart/digest
+ # message/rfc822
+ # text/plain
+ # message/rfc822
+ # text/plain
+ eq(msg.is_multipart(), 1)
+ eq(len(msg.get_payload()), 2)
+ part1 = msg.get_payload(0)
+ eq(part1.get_content_type(), 'message/rfc822')
+ eq(part1.is_multipart(), 1)
+ eq(len(part1.get_payload()), 1)
+ part1a = part1.get_payload(0)
+ eq(part1a.is_multipart(), 0)
+ eq(part1a.get_content_type(), 'text/plain')
+ neq(part1a.get_payload(), 'message 1\n')
+ # next message/rfc822
+ part2 = msg.get_payload(1)
+ eq(part2.get_content_type(), 'message/rfc822')
+ eq(part2.is_multipart(), 1)
+ eq(len(part2.get_payload()), 1)
+ part2a = part2.get_payload(0)
+ eq(part2a.is_multipart(), 0)
+ eq(part2a.get_content_type(), 'text/plain')
+ neq(part2a.get_payload(), 'message 2\n')
+
+ def test_three_lines(self):
+ # A bug report by Andrew McNamara
+ lines = ['From: Andrew Person <aperson@dom.ain',
+ 'Subject: Test',
+ 'Date: Tue, 20 Aug 2002 16:43:45 +1000']
+ msg = email.message_from_string(NL.join(lines))
+ self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
+
+ def test_strip_line_feed_and_carriage_return_in_headers(self):
+ eq = self.assertEqual
+ # For [ 1002475 ] email message parser doesn't handle \r\n correctly
+ value1 = 'text'
+ value2 = 'more text'
+ m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
+ value1, value2)
+ msg = email.message_from_string(m)
+ eq(msg.get('Header'), value1)
+ eq(msg.get('Next-Header'), value2)
+
+ def test_rfc2822_header_syntax(self):
+ eq = self.assertEqual
+ m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
+ msg = email.message_from_string(m)
+ eq(len(msg.keys()), 3)
+ keys = msg.keys()
+ keys.sort()
+ eq(keys, ['!"#QUX;~', '>From', 'From'])
+ eq(msg.get_payload(), 'body')
+
+ def test_rfc2822_space_not_allowed_in_header(self):
+ eq = self.assertEqual
+ m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
+ msg = email.message_from_string(m)
+ eq(len(msg.keys()), 0)
+
+ def test_rfc2822_one_character_header(self):
+ eq = self.assertEqual
+ m = 'A: first header\nB: second header\nCC: third header\n\nbody'
+ msg = email.message_from_string(m)
+ headers = msg.keys()
+ headers.sort()
+ eq(headers, ['A', 'B', 'CC'])
+ eq(msg.get_payload(), 'body')
+
+
+
+class TestBase64(unittest.TestCase):
+ def test_len(self):
+ eq = self.assertEqual
+ eq(base64mime.base64_len('hello'),
+ len(base64mime.encode('hello', eol='')))
+ for size in range(15):
+ if size == 0 : bsize = 0
+ elif size <= 3 : bsize = 4
+ elif size <= 6 : bsize = 8
+ elif size <= 9 : bsize = 12
+ elif size <= 12: bsize = 16
+ else : bsize = 20
+ eq(base64mime.base64_len('x'*size), bsize)
+
+ def test_decode(self):
+ eq = self.assertEqual
+ eq(base64mime.decode(''), '')
+ eq(base64mime.decode('aGVsbG8='), 'hello')
+ eq(base64mime.decode('aGVsbG8=', 'X'), 'hello')
+ eq(base64mime.decode('aGVsbG8NCndvcmxk\n', 'X'), 'helloXworld')
+
+ def test_encode(self):
+ eq = self.assertEqual
+ eq(base64mime.encode(''), '')
+ eq(base64mime.encode('hello'), 'aGVsbG8=\n')
+ # Test the binary flag
+ eq(base64mime.encode('hello\n'), 'aGVsbG8K\n')
+ eq(base64mime.encode('hello\n', 0), 'aGVsbG8NCg==\n')
+ # Test the maxlinelen arg
+ eq(base64mime.encode('xxxx ' * 20, maxlinelen=40), """\
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
+eHh4eCB4eHh4IA==
+""")
+ # Test the eol argument
+ eq(base64mime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
+eHh4eCB4eHh4IA==\r
+""")
+
+ def test_header_encode(self):
+ eq = self.assertEqual
+ he = base64mime.header_encode
+ eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
+ eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
+ # Test the charset option
+ eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
+ # Test the keep_eols flag
+ eq(he('hello\nworld', keep_eols=True),
+ '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
+ # Test the maxlinelen argument
+ eq(he('xxxx ' * 20, maxlinelen=40), """\
+=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=
+ =?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=
+ =?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=
+ =?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=
+ =?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=
+ =?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
+ # Test the eol argument
+ eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=\r
+ =?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=\r
+ =?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=\r
+ =?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=\r
+ =?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=\r
+ =?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
+
+
+
+class TestQuopri(unittest.TestCase):
+ def setUp(self):
+ self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \
+ [chr(x) for x in range(ord('A'), ord('Z')+1)] + \
+ [chr(x) for x in range(ord('0'), ord('9')+1)] + \
+ ['!', '*', '+', '-', '/', ' ']
+ self.hnon = [chr(x) for x in range(256) if chr(x) not in self.hlit]
+ assert len(self.hlit) + len(self.hnon) == 256
+ self.blit = [chr(x) for x in range(ord(' '), ord('~')+1)] + ['\t']
+ self.blit.remove('=')
+ self.bnon = [chr(x) for x in range(256) if chr(x) not in self.blit]
+ assert len(self.blit) + len(self.bnon) == 256
+
+ def test_header_quopri_check(self):
+ for c in self.hlit:
+ self.failIf(quoprimime.header_quopri_check(c))
+ for c in self.hnon:
+ self.failUnless(quoprimime.header_quopri_check(c))
+
+ def test_body_quopri_check(self):
+ for c in self.blit:
+ self.failIf(quoprimime.body_quopri_check(c))
+ for c in self.bnon:
+ self.failUnless(quoprimime.body_quopri_check(c))
+
+ def test_header_quopri_len(self):
+ eq = self.assertEqual
+ hql = quoprimime.header_quopri_len
+ enc = quoprimime.header_encode
+ for s in ('hello', 'h@e@l@l@o@'):
+ # Empty charset and no line-endings. 7 == RFC chrome
+ eq(hql(s), len(enc(s, charset='', eol=''))-7)
+ for c in self.hlit:
+ eq(hql(c), 1)
+ for c in self.hnon:
+ eq(hql(c), 3)
+
+ def test_body_quopri_len(self):
+ eq = self.assertEqual
+ bql = quoprimime.body_quopri_len
+ for c in self.blit:
+ eq(bql(c), 1)
+ for c in self.bnon:
+ eq(bql(c), 3)
+
+ def test_quote_unquote_idempotent(self):
+ for x in range(256):
+ c = chr(x)
+ self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
+
+ def test_header_encode(self):
+ eq = self.assertEqual
+ he = quoprimime.header_encode
+ eq(he('hello'), '=?iso-8859-1?q?hello?=')
+ eq(he('hello\nworld'), '=?iso-8859-1?q?hello=0D=0Aworld?=')
+ # Test the charset option
+ eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?q?hello?=')
+ # Test the keep_eols flag
+ eq(he('hello\nworld', keep_eols=True), '=?iso-8859-1?q?hello=0Aworld?=')
+ # Test a non-ASCII character
+ eq(he('hello\xc7there'), '=?iso-8859-1?q?hello=C7there?=')
+ # Test the maxlinelen argument
+ eq(he('xxxx ' * 20, maxlinelen=40), """\
+=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
+ =?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
+ =?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=
+ =?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=
+ =?iso-8859-1?q?x_xxxx_xxxx_?=""")
+ # Test the eol argument
+ eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=\r
+ =?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=\r
+ =?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=\r
+ =?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=\r
+ =?iso-8859-1?q?x_xxxx_xxxx_?=""")
+
+ def test_decode(self):
+ eq = self.assertEqual
+ eq(quoprimime.decode(''), '')
+ eq(quoprimime.decode('hello'), 'hello')
+ eq(quoprimime.decode('hello', 'X'), 'hello')
+ eq(quoprimime.decode('hello\nworld', 'X'), 'helloXworld')
+
+ def test_encode(self):
+ eq = self.assertEqual
+ eq(quoprimime.encode(''), '')
+ eq(quoprimime.encode('hello'), 'hello')
+ # Test the binary flag
+ eq(quoprimime.encode('hello\r\nworld'), 'hello\nworld')
+ eq(quoprimime.encode('hello\r\nworld', 0), 'hello\nworld')
+ # Test the maxlinelen arg
+ eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40), """\
+xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
+ xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
+x xxxx xxxx xxxx xxxx=20""")
+ # Test the eol argument
+ eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
+ xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
+x xxxx xxxx xxxx xxxx=20""")
+ eq(quoprimime.encode("""\
+one line
+
+two line"""), """\
+one line
+
+two line""")
+
+
+
+# Test the Charset class
+class TestCharset(unittest.TestCase):
+ def tearDown(self):
+ from email import charset as CharsetModule
+ try:
+ del CharsetModule.CHARSETS['fake']
+ except KeyError:
+ pass
+
+ def test_idempotent(self):
+ eq = self.assertEqual
+ # Make sure us-ascii = no Unicode conversion
+ c = Charset('us-ascii')
+ s = 'Hello World!'
+ sp = c.to_splittable(s)
+ eq(s, c.from_splittable(sp))
+ # test 8-bit idempotency with us-ascii
+ s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
+ sp = c.to_splittable(s)
+ eq(s, c.from_splittable(sp))
+
+ def test_body_encode(self):
+ eq = self.assertEqual
+ # Try a charset with QP body encoding
+ c = Charset('iso-8859-1')
+ eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
+ # Try a charset with Base64 body encoding
+ c = Charset('utf-8')
+ eq('aGVsbG8gd29ybGQ=\n', c.body_encode('hello world'))
+ # Try a charset with None body encoding
+ c = Charset('us-ascii')
+ eq('hello world', c.body_encode('hello world'))
+ # Try the convert argument, where input codec <> output codec
+ c = Charset('euc-jp')
+ # With apologies to Tokio Kikuchi ;)
+ try:
+ eq('\x1b$B5FCO;~IW\x1b(B',
+ c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
+ eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
+ c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
+ except LookupError:
+ # We probably don't have the Japanese codecs installed
+ pass
+ # Testing SF bug #625509, which we have to fake, since there are no
+ # built-in encodings where the header encoding is QP but the body
+ # encoding is not.
+ from email import charset as CharsetModule
+ CharsetModule.add_charset('fake', CharsetModule.QP, None)
+ c = Charset('fake')
+ eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
+
+ def test_unicode_charset_name(self):
+ charset = Charset(u'us-ascii')
+ self.assertEqual(str(charset), 'us-ascii')
+ self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
+
+
+
+# Test multilingual MIME headers.
+class TestHeader(TestEmailBase):
+ def test_simple(self):
+ eq = self.ndiffAssertEqual
+ h = Header('Hello World!')
+ eq(h.encode(), 'Hello World!')
+ h.append(' Goodbye World!')
+ eq(h.encode(), 'Hello World! Goodbye World!')
+
+ def test_simple_surprise(self):
+ eq = self.ndiffAssertEqual
+ h = Header('Hello World!')
+ eq(h.encode(), 'Hello World!')
+ h.append('Goodbye World!')
+ eq(h.encode(), 'Hello World! Goodbye World!')
+
+ def test_header_needs_no_decoding(self):
+ h = 'no decoding needed'
+ self.assertEqual(decode_header(h), [(h, None)])
+
+ def test_long(self):
+ h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
+ maxlinelen=76)
+ for l in h.encode(splitchars=' ').split('\n '):
+ self.failUnless(len(l) <= 76)
+
+ def test_multilingual(self):
+ eq = self.ndiffAssertEqual
+ g = Charset("iso-8859-1")
+ cz = Charset("iso-8859-2")
+ utf8 = Charset("utf-8")
+ g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
+ cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
+ utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
+ h = Header(g_head, g)
+ h.append(cz_head, cz)
+ h.append(utf8_head, utf8)
+ enc = h.encode()
+ eq(enc, """\
+=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_ko?=
+ =?iso-8859-1?q?mfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wan?=
+ =?iso-8859-1?q?dgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6?=
+ =?iso-8859-1?q?rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
+ =?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
+ =?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
+ =?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
+ =?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
+ =?utf-8?q?_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das_Oder_die_Fl?=
+ =?utf-8?b?aXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBo+OBpuOBhOOBvuOBmQ==?=
+ =?utf-8?b?44CC?=""")
+ eq(decode_header(enc),
+ [(g_head, "iso-8859-1"), (cz_head, "iso-8859-2"),
+ (utf8_head, "utf-8")])
+ ustr = unicode(h)
+ eq(ustr.encode('utf-8'),
+ 'Die Mieter treten hier ein werden mit einem Foerderband '
+ 'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
+ 'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
+ 'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
+ 'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
+ '\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
+ '\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
+ '\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
+ '\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
+ '\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
+ '\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
+ '\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
+ '\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
+ 'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
+ 'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
+ '\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82')
+ # Test make_header()
+ newh = make_header(decode_header(enc))
+ eq(newh, enc)
+
+ def test_header_ctor_default_args(self):
+ eq = self.ndiffAssertEqual
+ h = Header()
+ eq(h, '')
+ h.append('foo', Charset('iso-8859-1'))
+ eq(h, '=?iso-8859-1?q?foo?=')
+
+ def test_explicit_maxlinelen(self):
+ eq = self.ndiffAssertEqual
+ hstr = 'A very long line that must get split to something other than at the 76th character boundary to test the non-default behavior'
+ h = Header(hstr)
+ eq(h.encode(), '''\
+A very long line that must get split to something other than at the 76th
+ character boundary to test the non-default behavior''')
+ h = Header(hstr, header_name='Subject')
+ eq(h.encode(), '''\
+A very long line that must get split to something other than at the
+ 76th character boundary to test the non-default behavior''')
+ h = Header(hstr, maxlinelen=1024, header_name='Subject')
+ eq(h.encode(), hstr)
+
+ def test_us_ascii_header(self):
+ eq = self.assertEqual
+ s = 'hello'
+ x = decode_header(s)
+ eq(x, [('hello', None)])
+ h = make_header(x)
+ eq(s, h.encode())
+
+ def test_string_charset(self):
+ eq = self.assertEqual
+ h = Header()
+ h.append('hello', 'iso-8859-1')
+ eq(h, '=?iso-8859-1?q?hello?=')
+
+## def test_unicode_error(self):
+## raises = self.assertRaises
+## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
+## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
+## h = Header()
+## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
+## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
+## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
+
+ def test_utf8_shortest(self):
+ eq = self.assertEqual
+ h = Header(u'p\xf6stal', 'utf-8')
+ eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
+ h = Header(u'\u83ca\u5730\u6642\u592b', 'utf-8')
+ eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
+
+ def test_bad_8bit_header(self):
+ raises = self.assertRaises
+ eq = self.assertEqual
+ x = 'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
+ raises(UnicodeError, Header, x)
+ h = Header()
+ raises(UnicodeError, h.append, x)
+ eq(str(Header(x, errors='replace')), x)
+ h.append(x, errors='replace')
+ eq(str(h), x)
+
+ def test_encoded_adjacent_nonencoded(self):
+ eq = self.assertEqual
+ h = Header()
+ h.append('hello', 'iso-8859-1')
+ h.append('world')
+ s = h.encode()
+ eq(s, '=?iso-8859-1?q?hello?= world')
+ h = make_header(decode_header(s))
+ eq(h.encode(), s)
+
+ def test_whitespace_eater(self):
+ eq = self.assertEqual
+ s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
+ parts = decode_header(s)
+ eq(parts, [('Subject:', None), ('\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), ('zz.', None)])
+ hdr = make_header(parts)
+ eq(hdr.encode(),
+ 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
+
+ def test_broken_base64_header(self):
+ raises = self.assertRaises
+ s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ?='
+ raises(errors.HeaderParseError, decode_header, s)
+
+
+
+# Test RFC 2231 header parameters (en/de)coding
+class TestRFC2231(TestEmailBase):
+ def test_get_param(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_29.txt')
+ eq(msg.get_param('title'),
+ ('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
+ eq(msg.get_param('title', unquote=False),
+ ('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
+
+ def test_set_param(self):
+ eq = self.assertEqual
+ msg = Message()
+ msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+ charset='us-ascii')
+ eq(msg.get_param('title'),
+ ('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
+ msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+ charset='us-ascii', language='en')
+ eq(msg.get_param('title'),
+ ('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
+ msg = self._msgobj('msg_01.txt')
+ msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+ charset='us-ascii', language='en')
+ eq(msg.as_string(), """\
+Return-Path: <bbb@zzz.org>
+Delivered-To: bbb@zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
+From: bbb@ddd.com (John X. Doe)
+To: bbb@zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+Content-Type: text/plain; charset=us-ascii;
+\ttitle*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
+
+
+Hi,
+
+Do you like this message?
+
+-Me
+""")
+
+ def test_del_param(self):
+ eq = self.ndiffAssertEqual
+ msg = self._msgobj('msg_01.txt')
+ msg.set_param('foo', 'bar', charset='us-ascii', language='en')
+ msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+ charset='us-ascii', language='en')
+ msg.del_param('foo', header='Content-Type')
+ eq(msg.as_string(), """\
+Return-Path: <bbb@zzz.org>
+Delivered-To: bbb@zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
+From: bbb@ddd.com (John X. Doe)
+To: bbb@zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+Content-Type: text/plain; charset="us-ascii";
+\ttitle*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
+
+
+Hi,
+
+Do you like this message?
+
+-Me
+""")
+
+ def test_rfc2231_get_content_charset(self):
+ eq = self.assertEqual
+ msg = self._msgobj('msg_32.txt')
+ eq(msg.get_content_charset(), 'us-ascii')
+
+ def test_rfc2231_no_language_or_charset(self):
+ m = '''\
+Content-Transfer-Encoding: 8bit
+Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
+Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
+
+'''
+ msg = email.message_from_string(m)
+ self.assertEqual(msg.get_param('NAME'),
+ (None, None, 'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm'))
+
+ def test_rfc2231_no_language_or_charset_in_filename(self):
+ m = '''\
+Content-Disposition: inline;
+\tfilename*0="This%20is%20even%20more%20";
+\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
+\tfilename*2="is it not.pdf"
+
+'''
+ msg = email.message_from_string(m)
+ self.assertEqual(msg.get_filename(),
+ 'This is even more ***fun*** is it not.pdf')
+
+ def test_rfc2231_no_language_or_charset_in_boundary(self):
+ m = '''\
+Content-Type: multipart/alternative;
+\tboundary*0="This%20is%20even%20more%20";
+\tboundary*1="%2A%2A%2Afun%2A%2A%2A%20";
+\tboundary*2="is it not.pdf"
+
+'''
+ msg = email.message_from_string(m)
+ self.assertEqual(msg.get_boundary(),
+ 'This is even more ***fun*** is it not.pdf')
+
+ def test_rfc2231_no_language_or_charset_in_charset(self):
+ # This is a nonsensical charset value, but tests the code anyway
+ m = '''\
+Content-Type: text/plain;
+\tcharset*0="This%20is%20even%20more%20";
+\tcharset*1="%2A%2A%2Afun%2A%2A%2A%20";
+\tcharset*2="is it not.pdf"
+
+'''
+ msg = email.message_from_string(m)
+ self.assertEqual(msg.get_content_charset(),
+ 'this is even more ***fun*** is it not.pdf')
+
+ def test_rfc2231_unknown_encoding(self):
+ m = """\
+Content-Transfer-Encoding: 8bit
+Content-Disposition: inline; filename*0=X-UNKNOWN''myfile.txt
+
+"""
+ msg = email.message_from_string(m)
+ self.assertEqual(msg.get_filename(), 'myfile.txt')
+
+
+
+def _testclasses():
+ mod = sys.modules[__name__]
+ return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
+
+
+def suite():
+ suite = unittest.TestSuite()
+ for testclass in _testclasses():
+ suite.addTest(unittest.makeSuite(testclass))
+ return suite
+
+
+def test_main():
+ for testclass in _testclasses():
+ run_unittest(testclass)
+
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='suite')
diff --git a/Lib/email/Utils.py b/Lib/email/utils.py
index 9ba7601..250eb19 100644
--- a/Lib/email/Utils.py
+++ b/Lib/email/utils.py
@@ -1,9 +1,24 @@
-# Copyright (C) 2001-2004 Python Software Foundation
+# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Miscellaneous utilities."""
+__all__ = [
+ 'collapse_rfc2231_value',
+ 'decode_params',
+ 'decode_rfc2231',
+ 'encode_rfc2231',
+ 'formataddr',
+ 'formatdate',
+ 'getaddresses',
+ 'make_msgid',
+ 'parseaddr',
+ 'parsedate',
+ 'parsedate_tz',
+ 'unquote',
+ ]
+
import os
import re
import time
@@ -24,7 +39,7 @@ from email._parseaddr import parsedate_tz as _parsedate_tz
from quopri import decodestring as _qdecode
# Intrapackage imports
-from email.Encoders import _bencode, _qencode
+from email.encoders import _bencode, _qencode
COMMASPACE = ', '
EMPTYSTRING = ''
diff --git a/Lib/encodings/big5.py b/Lib/encodings/big5.py
index d56aa1b..7adeb0e 100644
--- a/Lib/encodings/big5.py
+++ b/Lib/encodings/big5.py
@@ -2,10 +2,10 @@
# big5.py: Python Unicode Codec for BIG5
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: big5.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_tw, codecs
+import _multibytecodec as mbc
codec = _codecs_tw.getcodec('big5')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='big5',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/big5hkscs.py b/Lib/encodings/big5hkscs.py
index 443997f..350df37 100644
--- a/Lib/encodings/big5hkscs.py
+++ b/Lib/encodings/big5hkscs.py
@@ -2,10 +2,10 @@
# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: big5hkscs.py,v 1.1 2004/06/29 05:14:27 perky Exp $
#
import _codecs_hk, codecs
+import _multibytecodec as mbc
codec = _codecs_hk.getcodec('big5hkscs')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='big5hkscs',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/cp932.py b/Lib/encodings/cp932.py
index 38937f5..e01f59b 100644
--- a/Lib/encodings/cp932.py
+++ b/Lib/encodings/cp932.py
@@ -2,10 +2,10 @@
# cp932.py: Python Unicode Codec for CP932
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: cp932.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('cp932')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='cp932',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/cp949.py b/Lib/encodings/cp949.py
index 0f3c847..627c871 100644
--- a/Lib/encodings/cp949.py
+++ b/Lib/encodings/cp949.py
@@ -2,10 +2,10 @@
# cp949.py: Python Unicode Codec for CP949
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: cp949.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_kr, codecs
+import _multibytecodec as mbc
codec = _codecs_kr.getcodec('cp949')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='cp949',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/cp950.py b/Lib/encodings/cp950.py
index dab3e28..39eec5e 100644
--- a/Lib/encodings/cp950.py
+++ b/Lib/encodings/cp950.py
@@ -2,10 +2,10 @@
# cp950.py: Python Unicode Codec for CP950
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: cp950.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_tw, codecs
+import _multibytecodec as mbc
codec = _codecs_tw.getcodec('cp950')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='cp950',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/euc_jis_2004.py b/Lib/encodings/euc_jis_2004.py
index 02d55ca..72b87aea 100644
--- a/Lib/encodings/euc_jis_2004.py
+++ b/Lib/encodings/euc_jis_2004.py
@@ -2,10 +2,10 @@
# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: euc_jis_2004.py,v 1.1 2004/07/07 16:18:25 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jis_2004')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='euc_jis_2004',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/euc_jisx0213.py b/Lib/encodings/euc_jisx0213.py
index 30f173e..cc47d04 100644
--- a/Lib/encodings/euc_jisx0213.py
+++ b/Lib/encodings/euc_jisx0213.py
@@ -2,10 +2,10 @@
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: euc_jisx0213.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='euc_jisx0213',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/euc_jp.py b/Lib/encodings/euc_jp.py
index a3947a3..7bcbe41 100644
--- a/Lib/encodings/euc_jp.py
+++ b/Lib/encodings/euc_jp.py
@@ -2,10 +2,10 @@
# euc_jp.py: Python Unicode Codec for EUC_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: euc_jp.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jp')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='euc_jp',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/euc_kr.py b/Lib/encodings/euc_kr.py
index bbebee8..c1fb126 100644
--- a/Lib/encodings/euc_kr.py
+++ b/Lib/encodings/euc_kr.py
@@ -2,10 +2,10 @@
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: euc_kr.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_kr, codecs
+import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='euc_kr',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/gb18030.py b/Lib/encodings/gb18030.py
index 7eca319..34fb6c3 100644
--- a/Lib/encodings/gb18030.py
+++ b/Lib/encodings/gb18030.py
@@ -2,10 +2,10 @@
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: gb18030.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_cn, codecs
+import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='gb18030',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/gb2312.py b/Lib/encodings/gb2312.py
index 5130efa..3c3b837 100644
--- a/Lib/encodings/gb2312.py
+++ b/Lib/encodings/gb2312.py
@@ -2,10 +2,10 @@
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: gb2312.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_cn, codecs
+import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb2312')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='gb2312',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/gbk.py b/Lib/encodings/gbk.py
index 67854bc..1b45db8 100644
--- a/Lib/encodings/gbk.py
+++ b/Lib/encodings/gbk.py
@@ -2,10 +2,10 @@
# gbk.py: Python Unicode Codec for GBK
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: gbk.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_cn, codecs
+import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gbk')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='gbk',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/hz.py b/Lib/encodings/hz.py
index 3940894..383442a 100644
--- a/Lib/encodings/hz.py
+++ b/Lib/encodings/hz.py
@@ -2,10 +2,10 @@
# hz.py: Python Unicode Codec for HZ
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: hz.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_cn, codecs
+import _multibytecodec as mbc
codec = _codecs_cn.getcodec('hz')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='hz',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/idna.py b/Lib/encodings/idna.py
index 8bdae32..ea90d67 100644
--- a/Lib/encodings/idna.py
+++ b/Lib/encodings/idna.py
@@ -35,7 +35,7 @@ def nameprep(label):
stringprep.in_table_c7(c) or \
stringprep.in_table_c8(c) or \
stringprep.in_table_c9(c):
- raise UnicodeError, "Invalid character %s" % repr(c)
+ raise UnicodeError("Invalid character %r" % c)
# Check bidi
RandAL = map(stringprep.in_table_d1, label)
@@ -48,14 +48,14 @@ def nameprep(label):
# 2) If a string contains any RandALCat character, the string
# MUST NOT contain any LCat character.
if filter(stringprep.in_table_d2, label):
- raise UnicodeError, "Violation of BIDI requirement 2"
+ raise UnicodeError("Violation of BIDI requirement 2")
# 3) If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the
# string, and a RandALCat character MUST be the last
# character of the string.
if not RandAL[0] or not RandAL[-1]:
- raise UnicodeError, "Violation of BIDI requirement 3"
+ raise UnicodeError("Violation of BIDI requirement 3")
return label
@@ -70,7 +70,7 @@ def ToASCII(label):
# Skip to step 8.
if 0 < len(label) < 64:
return label
- raise UnicodeError, "label too long"
+ raise UnicodeError("label empty or too long")
# Step 2: nameprep
label = nameprep(label)
@@ -85,11 +85,11 @@ def ToASCII(label):
# Skip to step 8.
if 0 < len(label) < 64:
return label
- raise UnicodeError, "label too long"
+ raise UnicodeError("label empty or too long")
# Step 5: Check ACE prefix
if label.startswith(uace_prefix):
- raise UnicodeError, "Label starts with ACE prefix"
+ raise UnicodeError("Label starts with ACE prefix")
# Step 6: Encode with PUNYCODE
label = label.encode("punycode")
@@ -100,7 +100,7 @@ def ToASCII(label):
# Step 8: Check size
if 0 < len(label) < 64:
return label
- raise UnicodeError, "label too long"
+ raise UnicodeError("label empty or too long")
def ToUnicode(label):
# Step 1: Check for ASCII
@@ -119,7 +119,7 @@ def ToUnicode(label):
try:
label = label.encode("ascii")
except UnicodeError:
- raise UnicodeError, "Invalid character in IDN label"
+ raise UnicodeError("Invalid character in IDN label")
# Step 3: Check for ACE prefix
if not label.startswith(ace_prefix):
return unicode(label, "ascii")
@@ -136,7 +136,7 @@ def ToUnicode(label):
# Step 7: Compare the result of step 6 with the one of step 3
# label2 will already be in lower case.
if label.lower() != label2:
- raise UnicodeError, ("IDNA does not round-trip", label, label2)
+ raise UnicodeError("IDNA does not round-trip", label, label2)
# Step 8: return the result of step 5
return result
@@ -148,7 +148,7 @@ class Codec(codecs.Codec):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
- raise UnicodeError, "unsupported error handling "+errors
+ raise UnicodeError("unsupported error handling "+errors)
if not input:
return "", 0
@@ -168,7 +168,7 @@ class Codec(codecs.Codec):
def decode(self,input,errors='strict'):
if errors != 'strict':
- raise UnicodeError, "Unsupported error handling "+errors
+ raise UnicodeError("Unsupported error handling "+errors)
if not input:
return u"", 0
@@ -194,13 +194,79 @@ class Codec(codecs.Codec):
return u".".join(result)+trailing_dot, len(input)
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return Codec().encode(input, self.errors)[0]
+class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
+ def _buffer_encode(self, input, errors, final):
+ if errors != 'strict':
+ # IDNA is quite clear that implementations must be strict
+ raise UnicodeError("unsupported error handling "+errors)
+
+ if not input:
+ return ("", 0)
+
+ labels = dots.split(input)
+ trailing_dot = u''
+ if labels:
+ if not labels[-1]:
+ trailing_dot = '.'
+ del labels[-1]
+ elif not final:
+ # Keep potentially unfinished label until the next call
+ del labels[-1]
+ if labels:
+ trailing_dot = '.'
+
+ result = []
+ size = 0
+ for label in labels:
+ result.append(ToASCII(label))
+ if size:
+ size += 1
+ size += len(label)
+
+ # Join with U+002E
+ result = ".".join(result) + trailing_dot
+ size += len(trailing_dot)
+ return (result, size)
+
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+ def _buffer_decode(self, input, errors, final):
+ if errors != 'strict':
+ raise UnicodeError("Unsupported error handling "+errors)
+
+ if not input:
+ return (u"", 0)
+
+ # IDNA allows decoding to operate on Unicode strings, too.
+ if isinstance(input, unicode):
+ labels = dots.split(input)
+ else:
+ # Must be ASCII string
+ input = str(input)
+ unicode(input, "ascii")
+ labels = input.split(".")
+
+ trailing_dot = u''
+ if labels:
+ if not labels[-1]:
+ trailing_dot = u'.'
+ del labels[-1]
+ elif not final:
+ # Keep potentially unfinished label until the next call
+ del labels[-1]
+ if labels:
+ trailing_dot = u'.'
+
+ result = []
+ size = 0
+ for label in labels:
+ result.append(ToUnicode(label))
+ if size:
+ size += 1
+ size += len(label)
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return Codec().decode(input, self.errors)[0]
+ result = u".".join(result) + trailing_dot
+ size += len(trailing_dot)
+ return (result, size)
class StreamWriter(Codec,codecs.StreamWriter):
pass
diff --git a/Lib/encodings/iso2022_jp.py b/Lib/encodings/iso2022_jp.py
index 109658b..ab04060 100644
--- a/Lib/encodings/iso2022_jp.py
+++ b/Lib/encodings/iso2022_jp.py
@@ -2,10 +2,10 @@
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_jp.py,v 1.2 2004/06/28 18:16:03 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_jp',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/iso2022_jp_1.py b/Lib/encodings/iso2022_jp_1.py
index 201bd28..997044d 100644
--- a/Lib/encodings/iso2022_jp_1.py
+++ b/Lib/encodings/iso2022_jp_1.py
@@ -2,10 +2,10 @@
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_jp_1.py,v 1.2 2004/06/28 18:16:03 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_jp_1',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/iso2022_jp_2.py b/Lib/encodings/iso2022_jp_2.py
index 7a61018..9106bf7 100644
--- a/Lib/encodings/iso2022_jp_2.py
+++ b/Lib/encodings/iso2022_jp_2.py
@@ -2,10 +2,10 @@
# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_jp_2.py,v 1.2 2004/06/28 18:16:03 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_jp_2',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/iso2022_jp_2004.py b/Lib/encodings/iso2022_jp_2004.py
index 2497124..40198bf 100644
--- a/Lib/encodings/iso2022_jp_2004.py
+++ b/Lib/encodings/iso2022_jp_2004.py
@@ -2,10 +2,10 @@
# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_jp_2004.py,v 1.1 2004/07/07 16:18:25 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2004')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_jp_2004',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/iso2022_jp_3.py b/Lib/encodings/iso2022_jp_3.py
index 8b2ed00..346e08b 100644
--- a/Lib/encodings/iso2022_jp_3.py
+++ b/Lib/encodings/iso2022_jp_3.py
@@ -2,10 +2,10 @@
# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_jp_3.py,v 1.2 2004/06/28 18:16:03 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_3')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_jp_3',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/iso2022_jp_ext.py b/Lib/encodings/iso2022_jp_ext.py
index 97cb4e7..752bab9 100644
--- a/Lib/encodings/iso2022_jp_ext.py
+++ b/Lib/encodings/iso2022_jp_ext.py
@@ -2,10 +2,10 @@
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_jp_ext.py,v 1.2 2004/06/28 18:16:03 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_jp_ext',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/iso2022_kr.py b/Lib/encodings/iso2022_kr.py
index f5549ca..bf70187 100644
--- a/Lib/encodings/iso2022_kr.py
+++ b/Lib/encodings/iso2022_kr.py
@@ -2,10 +2,10 @@
# iso2022_kr.py: Python Unicode Codec for ISO2022_KR
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: iso2022_kr.py,v 1.2 2004/06/28 18:16:03 perky Exp $
#
import _codecs_iso2022, codecs
+import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_kr')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='iso2022_kr',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/johab.py b/Lib/encodings/johab.py
index b6a87d7..512aeeb 100644
--- a/Lib/encodings/johab.py
+++ b/Lib/encodings/johab.py
@@ -2,10 +2,10 @@
# johab.py: Python Unicode Codec for JOHAB
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: johab.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_kr, codecs
+import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='johab',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/shift_jis.py b/Lib/encodings/shift_jis.py
index ec5e517..8338117 100644
--- a/Lib/encodings/shift_jis.py
+++ b/Lib/encodings/shift_jis.py
@@ -2,10 +2,10 @@
# shift_jis.py: Python Unicode Codec for SHIFT_JIS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: shift_jis.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='shift_jis',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/shift_jis_2004.py b/Lib/encodings/shift_jis_2004.py
index 446cd7c..161b1e8 100644
--- a/Lib/encodings/shift_jis_2004.py
+++ b/Lib/encodings/shift_jis_2004.py
@@ -2,10 +2,10 @@
# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: shift_jis_2004.py,v 1.1 2004/07/07 16:18:25 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis_2004')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='shift_jis_2004',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/encodings/shift_jisx0213.py b/Lib/encodings/shift_jisx0213.py
index 495468b..cb653f5 100644
--- a/Lib/encodings/shift_jisx0213.py
+++ b/Lib/encodings/shift_jisx0213.py
@@ -2,10 +2,10 @@
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
-# $CJKCodecs: shift_jisx0213.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_jp, codecs
+import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
@@ -13,22 +13,27 @@ class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
-class StreamReader(Codec, codecs.StreamReader):
- def __init__(self, stream, errors='strict'):
- codecs.StreamReader.__init__(self, stream, errors)
- __codec = codec.StreamReader(stream, errors)
- self.read = __codec.read
- self.readline = __codec.readline
- self.readlines = __codec.readlines
- self.reset = __codec.reset
-
-class StreamWriter(Codec, codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- codecs.StreamWriter.__init__(self, stream, errors)
- __codec = codec.StreamWriter(stream, errors)
- self.write = __codec.write
- self.writelines = __codec.writelines
- self.reset = __codec.reset
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
def getregentry():
- return (codec.encode, codec.decode, StreamReader, StreamWriter)
+ return codecs.CodecInfo(
+ name='shift_jisx0213',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Lib/getpass.py b/Lib/getpass.py
index a30d3a1..8204a47 100644
--- a/Lib/getpass.py
+++ b/Lib/getpass.py
@@ -15,11 +15,14 @@ import sys
__all__ = ["getpass","getuser"]
-def unix_getpass(prompt='Password: '):
+def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
+ The prompt is written on stream, by default stdout.
Restore terminal settings at end.
"""
+ if stream is None:
+ stream = sys.stdout
try:
fd = sys.stdin.fileno()
@@ -32,18 +35,18 @@ def unix_getpass(prompt='Password: '):
new[3] = new[3] & ~termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
- passwd = _raw_input(prompt)
+ passwd = _raw_input(prompt, stream)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
- sys.stdout.write('\n')
+ stream.write('\n')
return passwd
-def win_getpass(prompt='Password: '):
+def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
- return default_getpass(prompt)
+ return default_getpass(prompt, stream)
import msvcrt
for c in prompt:
msvcrt.putch(c)
@@ -63,16 +66,18 @@ def win_getpass(prompt='Password: '):
return pw
-def default_getpass(prompt='Password: '):
- print "Warning: Problem with getpass. Passwords may be echoed."
- return _raw_input(prompt)
+def default_getpass(prompt='Password: ', stream=None):
+ print >>sys.stderr, "Warning: Problem with getpass. Passwords may be echoed."
+ return _raw_input(prompt, stream)
-def _raw_input(prompt=""):
+def _raw_input(prompt="", stream=None):
# This doesn't save the string in the GNU readline history.
+ if stream is None:
+ stream = sys.stdout
prompt = str(prompt)
if prompt:
- sys.stdout.write(prompt)
+ stream.write(prompt)
line = sys.stdin.readline()
if not line:
raise EOFError
diff --git a/Lib/glob.py b/Lib/glob.py
index ecc6d25..95656cc 100644
--- a/Lib/glob.py
+++ b/Lib/glob.py
@@ -60,7 +60,7 @@ def glob0(dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
- if os.isdir(dirname):
+ if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py
index ce1fd2a..deeb5c5 100644
--- a/Lib/idlelib/IOBinding.py
+++ b/Lib/idlelib/IOBinding.py
@@ -377,6 +377,7 @@ class IOBinding:
try:
f = open(filename, "wb")
f.write(chars)
+ f.flush()
f.close()
return True
except IOError, msg:
diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt
index bbe9878..8163330 100644
--- a/Lib/idlelib/NEWS.txt
+++ b/Lib/idlelib/NEWS.txt
@@ -1,7 +1,10 @@
-What's New in IDLE 1.2a0?
-=======================
+What's New in IDLE 1.2a1?
+=========================
+
+*Release date: 05-APR-2006*
-*Release date: XX-XXX-2006*
+- Source file f.flush() after writing; trying to avoid lossage if user
+ kills GUI.
- Options / Keys / Advanced dialog made functional. Also, allow binding
of 'movement' keys.
@@ -70,7 +73,7 @@ What's New in IDLE 1.2a0?
- Improve error handling when .idlerc can't be created (warn and exit).
-- The GUI was hanging if the shell window was closed while a raw_input()
+- The GUI was hanging if the shell window was closed while a raw_input()
was pending. Restored the quit() of the readline() mainloop().
http://mail.python.org/pipermail/idle-dev/2004-December/002307.html
diff --git a/Lib/idlelib/idlever.py b/Lib/idlelib/idlever.py
index eef2885..fbde56c 100644
--- a/Lib/idlelib/idlever.py
+++ b/Lib/idlelib/idlever.py
@@ -1 +1 @@
-IDLE_VERSION = "1.2a0"
+IDLE_VERSION = "1.2a1"
diff --git a/Lib/inspect.py b/Lib/inspect.py
index 57bf18c..2e4d987 100644
--- a/Lib/inspect.py
+++ b/Lib/inspect.py
@@ -353,7 +353,7 @@ def getsourcefile(object):
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
- if os.path.exists(filename):
+ if os.path.exists(filename) or hasattr(getmodule(object), '__loader__'):
return filename
def getabsfile(object):
@@ -379,7 +379,7 @@ def getmodule(object):
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
for module in sys.modules.values():
- if hasattr(module, '__file__'):
+ if ismodule(module) and hasattr(module, '__file__'):
modulesbyfile[
os.path.realpath(
getabsfile(module))] = module.__name__
@@ -406,7 +406,7 @@ def findsource(object):
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object) or getfile(object)
- lines = linecache.getlines(file)
+ lines = linecache.getlines(file, getmodule(object).__dict__)
if not lines:
raise IOError('could not get source code')
diff --git a/Lib/lib-old/Para.py b/Lib/lib-old/Para.py
deleted file mode 100644
index 2fd8dc6..0000000
--- a/Lib/lib-old/Para.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# Text formatting abstractions
-# Note -- this module is obsolete, it's too slow anyway
-
-
-# Oft-used type object
-Int = type(0)
-
-
-# Represent a paragraph. This is a list of words with associated
-# font and size information, plus indents and justification for the
-# entire paragraph.
-# Once the words have been added to a paragraph, it can be laid out
-# for different line widths. Once laid out, it can be rendered at
-# different screen locations. Once rendered, it can be queried
-# for mouse hits, and parts of the text can be highlighted
-class Para:
- #
- def __init__(self):
- self.words = [] # The words
- self.just = 'l' # Justification: 'l', 'r', 'lr' or 'c'
- self.indent_left = self.indent_right = self.indent_hang = 0
- # Final lay-out parameters, may change
- self.left = self.top = self.right = self.bottom = \
- self.width = self.height = self.lines = None
- #
- # Add a word, computing size information for it.
- # Words may also be added manually by appending to self.words
- # Each word should be a 7-tuple:
- # (font, text, width, space, stretch, ascent, descent)
- def addword(self, d, font, text, space, stretch):
- if font is not None:
- d.setfont(font)
- width = d.textwidth(text)
- ascent = d.baseline()
- descent = d.lineheight() - ascent
- spw = d.textwidth(' ')
- space = space * spw
- stretch = stretch * spw
- tuple = (font, text, width, space, stretch, ascent, descent)
- self.words.append(tuple)
- #
- # Hooks to begin and end anchors -- insert numbers in the word list!
- def bgn_anchor(self, id):
- self.words.append(id)
- #
- def end_anchor(self, id):
- self.words.append(0)
- #
- # Return the total length (width) of the text added so far, in pixels
- def getlength(self):
- total = 0
- for word in self.words:
- if type(word) is not Int:
- total = total + word[2] + word[3]
- return total
- #
- # Tab to a given position (relative to the current left indent):
- # remove all stretch, add fixed space up to the new indent.
- # If the current position is already at the tab stop,
- # don't add any new space (but still remove the stretch)
- def tabto(self, tab):
- total = 0
- as, de = 1, 0
- for i in range(len(self.words)):
- word = self.words[i]
- if type(word) is Int: continue
- (fo, te, wi, sp, st, as, de) = word
- self.words[i] = (fo, te, wi, sp, 0, as, de)
- total = total + wi + sp
- if total < tab:
- self.words.append((None, '', 0, tab-total, 0, as, de))
- #
- # Make a hanging tag: tab to hang, increment indent_left by hang,
- # and reset indent_hang to -hang
- def makehangingtag(self, hang):
- self.tabto(hang)
- self.indent_left = self.indent_left + hang
- self.indent_hang = -hang
- #
- # Decide where the line breaks will be given some screen width
- def layout(self, linewidth):
- self.width = linewidth
- height = 0
- self.lines = lines = []
- avail1 = self.width - self.indent_left - self.indent_right
- avail = avail1 - self.indent_hang
- words = self.words
- i = 0
- n = len(words)
- lastfont = None
- while i < n:
- firstfont = lastfont
- charcount = 0
- width = 0
- stretch = 0
- ascent = 0
- descent = 0
- lsp = 0
- j = i
- while i < n:
- word = words[i]
- if type(word) is Int:
- if word > 0 and width >= avail:
- break
- i = i+1
- continue
- fo, te, wi, sp, st, as, de = word
- if width + wi > avail and width > 0 and wi > 0:
- break
- if fo is not None:
- lastfont = fo
- if width == 0:
- firstfont = fo
- charcount = charcount + len(te) + (sp > 0)
- width = width + wi + sp
- lsp = sp
- stretch = stretch + st
- lst = st
- ascent = max(ascent, as)
- descent = max(descent, de)
- i = i+1
- while i > j and type(words[i-1]) is Int and \
- words[i-1] > 0: i = i-1
- width = width - lsp
- if i < n:
- stretch = stretch - lst
- else:
- stretch = 0
- tuple = i-j, firstfont, charcount, width, stretch, \
- ascent, descent
- lines.append(tuple)
- height = height + ascent + descent
- avail = avail1
- self.height = height
- #
- # Call a function for all words in a line
- def visit(self, wordfunc, anchorfunc):
- avail1 = self.width - self.indent_left - self.indent_right
- avail = avail1 - self.indent_hang
- v = self.top
- i = 0
- for tuple in self.lines:
- wordcount, firstfont, charcount, width, stretch, \
- ascent, descent = tuple
- h = self.left + self.indent_left
- if i == 0: h = h + self.indent_hang
- extra = 0
- if self.just == 'r': h = h + avail - width
- elif self.just == 'c': h = h + (avail - width) / 2
- elif self.just == 'lr' and stretch > 0:
- extra = avail - width
- v2 = v + ascent + descent
- for j in range(i, i+wordcount):
- word = self.words[j]
- if type(word) is Int:
- ok = anchorfunc(self, tuple, word, \
- h, v)
- if ok is not None: return ok
- continue
- fo, te, wi, sp, st, as, de = word
- if extra > 0 and stretch > 0:
- ex = extra * st / stretch
- extra = extra - ex
- stretch = stretch - st
- else:
- ex = 0
- h2 = h + wi + sp + ex
- ok = wordfunc(self, tuple, word, h, v, \
- h2, v2, (j==i), (j==i+wordcount-1))
- if ok is not None: return ok
- h = h2
- v = v2
- i = i + wordcount
- avail = avail1
- #
- # Render a paragraph in "drawing object" d, using the rectangle
- # given by (left, top, right) with an unspecified bottom.
- # Return the computed bottom of the text.
- def render(self, d, left, top, right):
- if self.width != right-left:
- self.layout(right-left)
- self.left = left
- self.top = top
- self.right = right
- self.bottom = self.top + self.height
- self.anchorid = 0
- try:
- self.d = d
- self.visit(self.__class__._renderword, \
- self.__class__._renderanchor)
- finally:
- self.d = None
- return self.bottom
- #
- def _renderword(self, tuple, word, h, v, h2, v2, isfirst, islast):
- if word[0] is not None: self.d.setfont(word[0])
- baseline = v + tuple[5]
- self.d.text((h, baseline - word[5]), word[1])
- if self.anchorid > 0:
- self.d.line((h, baseline+2), (h2, baseline+2))
- #
- def _renderanchor(self, tuple, word, h, v):
- self.anchorid = word
- #
- # Return which anchor(s) was hit by the mouse
- def hitcheck(self, mouseh, mousev):
- self.mouseh = mouseh
- self.mousev = mousev
- self.anchorid = 0
- self.hits = []
- self.visit(self.__class__._hitcheckword, \
- self.__class__._hitcheckanchor)
- return self.hits
- #
- def _hitcheckword(self, tuple, word, h, v, h2, v2, isfirst, islast):
- if self.anchorid > 0 and h <= self.mouseh <= h2 and \
- v <= self.mousev <= v2:
- self.hits.append(self.anchorid)
- #
- def _hitcheckanchor(self, tuple, word, h, v):
- self.anchorid = word
- #
- # Return whether the given anchor id is present
- def hasanchor(self, id):
- return id in self.words or -id in self.words
- #
- # Extract the raw text from the word list, substituting one space
- # for non-empty inter-word space, and terminating with '\n'
- def extract(self):
- text = ''
- for w in self.words:
- if type(w) is not Int:
- word = w[1]
- if w[3]: word = word + ' '
- text = text + word
- return text + '\n'
- #
- # Return which character position was hit by the mouse, as
- # an offset in the entire text as returned by extract().
- # Return None if the mouse was not in this paragraph
- def whereis(self, d, mouseh, mousev):
- if mousev < self.top or mousev > self.bottom:
- return None
- self.mouseh = mouseh
- self.mousev = mousev
- self.lastfont = None
- self.charcount = 0
- try:
- self.d = d
- return self.visit(self.__class__._whereisword, \
- self.__class__._whereisanchor)
- finally:
- self.d = None
- #
- def _whereisword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
- fo, te, wi, sp, st, as, de = word
- if fo is not None: self.lastfont = fo
- h = h1
- if isfirst: h1 = 0
- if islast: h2 = 999999
- if not (v1 <= self.mousev <= v2 and h1 <= self.mouseh <= h2):
- self.charcount = self.charcount + len(te) + (sp > 0)
- return
- if self.lastfont is not None:
- self.d.setfont(self.lastfont)
- cc = 0
- for c in te:
- cw = self.d.textwidth(c)
- if self.mouseh <= h + cw/2:
- return self.charcount + cc
- cc = cc+1
- h = h+cw
- self.charcount = self.charcount + cc
- if self.mouseh <= (h+h2) / 2:
- return self.charcount
- else:
- return self.charcount + 1
- #
- def _whereisanchor(self, tuple, word, h, v):
- pass
- #
- # Return screen position corresponding to position in paragraph.
- # Return tuple (h, vtop, vbaseline, vbottom).
- # This is more or less the inverse of whereis()
- def screenpos(self, d, pos):
- if pos < 0:
- ascent, descent = self.lines[0][5:7]
- return self.left, self.top, self.top + ascent, \
- self.top + ascent + descent
- self.pos = pos
- self.lastfont = None
- try:
- self.d = d
- ok = self.visit(self.__class__._screenposword, \
- self.__class__._screenposanchor)
- finally:
- self.d = None
- if ok is None:
- ascent, descent = self.lines[-1][5:7]
- ok = self.right, self.bottom - ascent - descent, \
- self.bottom - descent, self.bottom
- return ok
- #
- def _screenposword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
- fo, te, wi, sp, st, as, de = word
- if fo is not None: self.lastfont = fo
- cc = len(te) + (sp > 0)
- if self.pos > cc:
- self.pos = self.pos - cc
- return
- if self.pos < cc:
- self.d.setfont(self.lastfont)
- h = h1 + self.d.textwidth(te[:self.pos])
- else:
- h = h2
- ascent, descent = tuple[5:7]
- return h, v1, v1+ascent, v2
- #
- def _screenposanchor(self, tuple, word, h, v):
- pass
- #
- # Invert the stretch of text between pos1 and pos2.
- # If pos1 is None, the beginning is implied;
- # if pos2 is None, the end is implied.
- # Undoes its own effect when called again with the same arguments
- def invert(self, d, pos1, pos2):
- if pos1 is None:
- pos1 = self.left, self.top, self.top, self.top
- else:
- pos1 = self.screenpos(d, pos1)
- if pos2 is None:
- pos2 = self.right, self.bottom,self.bottom,self.bottom
- else:
- pos2 = self.screenpos(d, pos2)
- h1, top1, baseline1, bottom1 = pos1
- h2, top2, baseline2, bottom2 = pos2
- if bottom1 <= top2:
- d.invert((h1, top1), (self.right, bottom1))
- h1 = self.left
- if bottom1 < top2:
- d.invert((h1, bottom1), (self.right, top2))
- top1, bottom1 = top2, bottom2
- d.invert((h1, top1), (h2, bottom2))
diff --git a/Lib/lib-old/addpack.py b/Lib/lib-old/addpack.py
deleted file mode 100644
index 2fb2601..0000000
--- a/Lib/lib-old/addpack.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This module provides standard support for "packages".
-#
-# The idea is that large groups of related modules can be placed in
-# their own subdirectory, which can be added to the Python search path
-# in a relatively easy way.
-#
-# The current version takes a package name and searches the Python
-# search path for a directory by that name, and if found adds it to
-# the module search path (sys.path). It maintains a list of packages
-# that have already been added so adding the same package many times
-# is OK.
-#
-# It is intended to be used in a fairly stylized manner: each module
-# that wants to use a particular package, say 'Foo', is supposed to
-# contain the following code:
-#
-# from addpack import addpack
-# addpack('Foo')
-# <import modules from package Foo>
-#
-# Additional arguments, when present, provide additional places where
-# to look for the package before trying sys.path (these may be either
-# strings or lists/tuples of strings). Also, if the package name is a
-# full pathname, first the last component is tried in the usual way,
-# then the full pathname is tried last. If the package name is a
-# *relative* pathname (UNIX: contains a slash but doesn't start with
-# one), then nothing special is done. The packages "/foo/bar/bletch"
-# and "bletch" are considered the same, but unrelated to "bar/bletch".
-#
-# If the algorithm finds more than one suitable subdirectory, all are
-# added to the search path -- this makes it possible to override part
-# of a package. The same path will not be added more than once.
-#
-# If no directory is found, ImportError is raised.
-
-_packs = {} # {pack: [pathname, ...], ...}
-
-def addpack(pack, *locations):
- import os
- if os.path.isabs(pack):
- base = os.path.basename(pack)
- else:
- base = pack
- if _packs.has_key(base):
- return
- import sys
- path = []
- for loc in _flatten(locations) + sys.path:
- fn = os.path.join(loc, base)
- if fn not in path and os.path.isdir(fn):
- path.append(fn)
- if pack != base and pack not in path and os.path.isdir(pack):
- path.append(pack)
- if not path: raise ImportError, 'package ' + pack + ' not found'
- _packs[base] = path
- for fn in path:
- if fn not in sys.path:
- sys.path.append(fn)
-
-def _flatten(locations):
- locs = []
- for loc in locations:
- if type(loc) == type(''):
- locs.append(loc)
- else:
- locs = locs + _flatten(loc)
- return locs
diff --git a/Lib/lib-old/cmp.py b/Lib/lib-old/cmp.py
deleted file mode 100644
index 1146a25..0000000
--- a/Lib/lib-old/cmp.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Efficiently compare files, boolean outcome only (equal / not equal).
-
-Tricks (used in this order):
- - Files with identical type, size & mtime are assumed to be clones
- - Files with different type or size cannot be identical
- - We keep a cache of outcomes of earlier comparisons
- - We don't fork a process to run 'cmp' but read the files ourselves
-"""
-
-import os
-
-cache = {}
-
-def cmp(f1, f2, shallow=1):
- """Compare two files, use the cache if possible.
- Return 1 for identical files, 0 for different.
- Raise exceptions if either file could not be statted, read, etc."""
- s1, s2 = sig(os.stat(f1)), sig(os.stat(f2))
- if s1[0] != 8 or s2[0] != 8:
- # Either is a not a plain file -- always report as different
- return 0
- if shallow and s1 == s2:
- # type, size & mtime match -- report same
- return 1
- if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
- # types or sizes differ -- report different
- return 0
- # same type and size -- look in the cache
- key = (f1, f2)
- try:
- cs1, cs2, outcome = cache[key]
- # cache hit
- if s1 == cs1 and s2 == cs2:
- # cached signatures match
- return outcome
- # stale cached signature(s)
- except KeyError:
- # cache miss
- pass
- # really compare
- outcome = do_cmp(f1, f2)
- cache[key] = s1, s2, outcome
- return outcome
-
-def sig(st):
- """Return signature (i.e., type, size, mtime) from raw stat data
- 0-5: st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid
- 6-9: st_size, st_atime, st_mtime, st_ctime"""
- type = st[0] / 4096
- size = st[6]
- mtime = st[8]
- return type, size, mtime
-
-def do_cmp(f1, f2):
- """Compare two files, really."""
- bufsize = 8*1024 # Could be tuned
- fp1 = open(f1, 'rb')
- fp2 = open(f2, 'rb')
- while 1:
- b1 = fp1.read(bufsize)
- b2 = fp2.read(bufsize)
- if b1 != b2: return 0
- if not b1: return 1
diff --git a/Lib/lib-old/cmpcache.py b/Lib/lib-old/cmpcache.py
deleted file mode 100644
index 11540f8..0000000
--- a/Lib/lib-old/cmpcache.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""Efficiently compare files, boolean outcome only (equal / not equal).
-
-Tricks (used in this order):
- - Use the statcache module to avoid statting files more than once
- - Files with identical type, size & mtime are assumed to be clones
- - Files with different type or size cannot be identical
- - We keep a cache of outcomes of earlier comparisons
- - We don't fork a process to run 'cmp' but read the files ourselves
-"""
-
-import os
-from stat import *
-import statcache
-
-
-# The cache.
-#
-cache = {}
-
-
-def cmp(f1, f2, shallow=1):
- """Compare two files, use the cache if possible.
- May raise os.error if a stat or open of either fails.
- Return 1 for identical files, 0 for different.
- Raise exceptions if either file could not be statted, read, etc."""
- s1, s2 = sig(statcache.stat(f1)), sig(statcache.stat(f2))
- if not S_ISREG(s1[0]) or not S_ISREG(s2[0]):
- # Either is a not a plain file -- always report as different
- return 0
- if shallow and s1 == s2:
- # type, size & mtime match -- report same
- return 1
- if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
- # types or sizes differ -- report different
- return 0
- # same type and size -- look in the cache
- key = f1 + ' ' + f2
- if cache.has_key(key):
- cs1, cs2, outcome = cache[key]
- # cache hit
- if s1 == cs1 and s2 == cs2:
- # cached signatures match
- return outcome
- # stale cached signature(s)
- # really compare
- outcome = do_cmp(f1, f2)
- cache[key] = s1, s2, outcome
- return outcome
-
-def sig(st):
- """Return signature (i.e., type, size, mtime) from raw stat data."""
- return S_IFMT(st[ST_MODE]), st[ST_SIZE], st[ST_MTIME]
-
-def do_cmp(f1, f2):
- """Compare two files, really."""
- #print ' cmp', f1, f2 # XXX remove when debugged
- bufsize = 8*1024 # Could be tuned
- fp1 = open(f1, 'rb')
- fp2 = open(f2, 'rb')
- while 1:
- b1 = fp1.read(bufsize)
- b2 = fp2.read(bufsize)
- if b1 != b2: return 0
- if not b1: return 1
diff --git a/Lib/lib-old/codehack.py b/Lib/lib-old/codehack.py
deleted file mode 100644
index 0b5e3a1..0000000
--- a/Lib/lib-old/codehack.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# A subroutine for extracting a function name from a code object
-# (with cache)
-
-import sys
-from stat import *
-import string
-import os
-import linecache
-
-# XXX The functions getcodename() and getfuncname() are now obsolete
-# XXX as code and function objects now have a name attribute --
-# XXX co.co_name and f.func_name.
-# XXX getlineno() is now also obsolete because of the new attribute
-# XXX of code objects, co.co_firstlineno.
-
-# Extract the function or class name from a code object.
-# This is a bit of a hack, since a code object doesn't contain
-# the name directly. So what do we do:
-# - get the filename (which *is* in the code object)
-# - look in the code string to find the first SET_LINENO instruction
-# (this must be the first instruction)
-# - get the line from the file
-# - if the line starts with 'class' or 'def' (after possible whitespace),
-# extract the following identifier
-#
-# This breaks apart when the function was read from <stdin>
-# or constructed by exec(), when the file is not accessible,
-# and also when the file has been modified or when a line is
-# continued with a backslash before the function or class name.
-#
-# Because this is a pretty expensive hack, a cache is kept.
-
-SET_LINENO = 127 # The opcode (see "opcode.h" in the Python source)
-identchars = string.ascii_letters + string.digits + '_' # Identifier characters
-
-_namecache = {} # The cache
-
-def getcodename(co):
- try:
- return co.co_name
- except AttributeError:
- pass
- key = `co` # arbitrary but uniquely identifying string
- if _namecache.has_key(key): return _namecache[key]
- filename = co.co_filename
- code = co.co_code
- name = ''
- if ord(code[0]) == SET_LINENO:
- lineno = ord(code[1]) | ord(code[2]) << 8
- line = linecache.getline(filename, lineno)
- words = line.split()
- if len(words) >= 2 and words[0] in ('def', 'class'):
- name = words[1]
- for i in range(len(name)):
- if name[i] not in identchars:
- name = name[:i]
- break
- _namecache[key] = name
- return name
-
-# Use the above routine to find a function's name.
-
-def getfuncname(func):
- try:
- return func.func_name
- except AttributeError:
- pass
- return getcodename(func.func_code)
-
-# A part of the above code to extract just the line number from a code object.
-
-def getlineno(co):
- try:
- return co.co_firstlineno
- except AttributeError:
- pass
- code = co.co_code
- if ord(code[0]) == SET_LINENO:
- return ord(code[1]) | ord(code[2]) << 8
- else:
- return -1
diff --git a/Lib/lib-old/dircmp.py b/Lib/lib-old/dircmp.py
deleted file mode 100644
index 1e7bf2a..0000000
--- a/Lib/lib-old/dircmp.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""A class to build directory diff tools on."""
-
-import os
-
-import dircache
-import cmpcache
-import statcache
-from stat import *
-
-class dircmp:
- """Directory comparison class."""
-
- def new(self, a, b):
- """Initialize."""
- self.a = a
- self.b = b
- # Properties that caller may change before calling self.run():
- self.hide = [os.curdir, os.pardir] # Names never to be shown
- self.ignore = ['RCS', 'tags'] # Names ignored in comparison
-
- return self
-
- def run(self):
- """Compare everything except common subdirectories."""
- self.a_list = filter(dircache.listdir(self.a), self.hide)
- self.b_list = filter(dircache.listdir(self.b), self.hide)
- self.a_list.sort()
- self.b_list.sort()
- self.phase1()
- self.phase2()
- self.phase3()
-
- def phase1(self):
- """Compute common names."""
- self.a_only = []
- self.common = []
- for x in self.a_list:
- if x in self.b_list:
- self.common.append(x)
- else:
- self.a_only.append(x)
-
- self.b_only = []
- for x in self.b_list:
- if x not in self.common:
- self.b_only.append(x)
-
- def phase2(self):
- """Distinguish files, directories, funnies."""
- self.common_dirs = []
- self.common_files = []
- self.common_funny = []
-
- for x in self.common:
- a_path = os.path.join(self.a, x)
- b_path = os.path.join(self.b, x)
-
- ok = 1
- try:
- a_stat = statcache.stat(a_path)
- except os.error, why:
- # print 'Can\'t stat', a_path, ':', why[1]
- ok = 0
- try:
- b_stat = statcache.stat(b_path)
- except os.error, why:
- # print 'Can\'t stat', b_path, ':', why[1]
- ok = 0
-
- if ok:
- a_type = S_IFMT(a_stat[ST_MODE])
- b_type = S_IFMT(b_stat[ST_MODE])
- if a_type != b_type:
- self.common_funny.append(x)
- elif S_ISDIR(a_type):
- self.common_dirs.append(x)
- elif S_ISREG(a_type):
- self.common_files.append(x)
- else:
- self.common_funny.append(x)
- else:
- self.common_funny.append(x)
-
- def phase3(self):
- """Find out differences between common files."""
- xx = cmpfiles(self.a, self.b, self.common_files)
- self.same_files, self.diff_files, self.funny_files = xx
-
- def phase4(self):
- """Find out differences between common subdirectories.
- A new dircmp object is created for each common subdirectory,
- these are stored in a dictionary indexed by filename.
- The hide and ignore properties are inherited from the parent."""
- self.subdirs = {}
- for x in self.common_dirs:
- a_x = os.path.join(self.a, x)
- b_x = os.path.join(self.b, x)
- self.subdirs[x] = newdd = dircmp().new(a_x, b_x)
- newdd.hide = self.hide
- newdd.ignore = self.ignore
- newdd.run()
-
- def phase4_closure(self):
- """Recursively call phase4() on subdirectories."""
- self.phase4()
- for x in self.subdirs.keys():
- self.subdirs[x].phase4_closure()
-
- def report(self):
- """Print a report on the differences between a and b."""
- # Assume that phases 1 to 3 have been executed
- # Output format is purposely lousy
- print 'diff', self.a, self.b
- if self.a_only:
- print 'Only in', self.a, ':', self.a_only
- if self.b_only:
- print 'Only in', self.b, ':', self.b_only
- if self.same_files:
- print 'Identical files :', self.same_files
- if self.diff_files:
- print 'Differing files :', self.diff_files
- if self.funny_files:
- print 'Trouble with common files :', self.funny_files
- if self.common_dirs:
- print 'Common subdirectories :', self.common_dirs
- if self.common_funny:
- print 'Common funny cases :', self.common_funny
-
- def report_closure(self):
- """Print reports on self and on subdirs.
- If phase 4 hasn't been done, no subdir reports are printed."""
- self.report()
- try:
- x = self.subdirs
- except AttributeError:
- return # No subdirectories computed
- for x in self.subdirs.keys():
- print
- self.subdirs[x].report_closure()
-
- def report_phase4_closure(self):
- """Report and do phase 4 recursively."""
- self.report()
- self.phase4()
- for x in self.subdirs.keys():
- print
- self.subdirs[x].report_phase4_closure()
-
-
-def cmpfiles(a, b, common):
- """Compare common files in two directories.
- Return:
- - files that compare equal
- - files that compare different
- - funny cases (can't stat etc.)"""
-
- res = ([], [], [])
- for x in common:
- res[cmp(os.path.join(a, x), os.path.join(b, x))].append(x)
- return res
-
-
-def cmp(a, b):
- """Compare two files.
- Return:
- 0 for equal
- 1 for different
- 2 for funny cases (can't stat, etc.)"""
-
- try:
- if cmpcache.cmp(a, b): return 0
- return 1
- except os.error:
- return 2
-
-
-def filter(list, skip):
- """Return a copy with items that occur in skip removed."""
-
- result = []
- for item in list:
- if item not in skip: result.append(item)
- return result
-
-
-def demo():
- """Demonstration and testing."""
-
- import sys
- import getopt
- options, args = getopt.getopt(sys.argv[1:], 'r')
- if len(args) != 2:
- raise getopt.error, 'need exactly two args'
- dd = dircmp().new(args[0], args[1])
- dd.run()
- if ('-r', '') in options:
- dd.report_phase4_closure()
- else:
- dd.report()
-
-if __name__ == "__main__":
- demo()
diff --git a/Lib/lib-old/dump.py b/Lib/lib-old/dump.py
deleted file mode 100644
index 60bdba8..0000000
--- a/Lib/lib-old/dump.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Module 'dump'
-#
-# Print python code that reconstructs a variable.
-# This only works in certain cases.
-#
-# It works fine for:
-# - ints and floats (except NaNs and other weird things)
-# - strings
-# - compounds and lists, provided it works for all their elements
-# - imported modules, provided their name is the module name
-#
-# It works for top-level dictionaries but not for dictionaries
-# contained in other objects (could be made to work with some hassle
-# though).
-#
-# It does not work for functions (all sorts), classes, class objects,
-# windows, files etc.
-#
-# Finally, objects referenced by more than one name or contained in more
-# than one other object lose their sharing property (this is bad for
-# strings used as exception identifiers, for instance).
-
-# Dump a whole symbol table
-#
-def dumpsymtab(dict):
- for key in dict.keys():
- dumpvar(key, dict[key])
-
-# Dump a single variable
-#
-def dumpvar(name, x):
- import sys
- t = type(x)
- if t == type({}):
- print name, '= {}'
- for key in x.keys():
- item = x[key]
- if not printable(item):
- print '#',
- print name, '[', `key`, '] =', `item`
- elif t in (type(''), type(0), type(0.0), type([]), type(())):
- if not printable(x):
- print '#',
- print name, '=', `x`
- elif t == type(sys):
- print 'import', name, '#', x
- else:
- print '#', name, '=', x
-
-# check if a value is printable in a way that can be read back with input()
-#
-def printable(x):
- t = type(x)
- if t in (type(''), type(0), type(0.0)):
- return 1
- if t in (type([]), type(())):
- for item in x:
- if not printable(item):
- return 0
- return 1
- if x == {}:
- return 1
- return 0
diff --git a/Lib/lib-old/find.py b/Lib/lib-old/find.py
deleted file mode 100644
index 39ad771..0000000
--- a/Lib/lib-old/find.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import fnmatch
-import os
-
-_debug = 0
-
-_prune = ['(*)']
-
-def find(pattern, dir = os.curdir):
- list = []
- names = os.listdir(dir)
- names.sort()
- for name in names:
- if name in (os.curdir, os.pardir):
- continue
- fullname = os.path.join(dir, name)
- if fnmatch.fnmatch(name, pattern):
- list.append(fullname)
- if os.path.isdir(fullname) and not os.path.islink(fullname):
- for p in _prune:
- if fnmatch.fnmatch(name, p):
- if _debug: print "skip", `fullname`
- break
- else:
- if _debug: print "descend into", `fullname`
- list = list + find(pattern, fullname)
- return list
diff --git a/Lib/lib-old/fmt.py b/Lib/lib-old/fmt.py
deleted file mode 100644
index 997d37a..0000000
--- a/Lib/lib-old/fmt.py
+++ /dev/null
@@ -1,623 +0,0 @@
-# Text formatting abstractions
-# Note -- this module is obsolete, it's too slow anyway
-
-
-import string
-import Para
-
-
-# A formatter back-end object has one method that is called by the formatter:
-# addpara(p), where p is a paragraph object. For example:
-
-
-# Formatter back-end to do nothing at all with the paragraphs
-class NullBackEnd:
- #
- def __init__(self):
- pass
- #
- def addpara(self, p):
- pass
- #
- def bgn_anchor(self, id):
- pass
- #
- def end_anchor(self, id):
- pass
-
-
-# Formatter back-end to collect the paragraphs in a list
-class SavingBackEnd(NullBackEnd):
- #
- def __init__(self):
- self.paralist = []
- #
- def addpara(self, p):
- self.paralist.append(p)
- #
- def hitcheck(self, h, v):
- hits = []
- for p in self.paralist:
- if p.top <= v <= p.bottom:
- for id in p.hitcheck(h, v):
- if id not in hits:
- hits.append(id)
- return hits
- #
- def extract(self):
- text = ''
- for p in self.paralist:
- text = text + (p.extract())
- return text
- #
- def extractpart(self, long1, long2):
- if long1 > long2: long1, long2 = long2, long1
- para1, pos1 = long1
- para2, pos2 = long2
- text = ''
- while para1 < para2:
- ptext = self.paralist[para1].extract()
- text = text + ptext[pos1:]
- pos1 = 0
- para1 = para1 + 1
- ptext = self.paralist[para2].extract()
- return text + ptext[pos1:pos2]
- #
- def whereis(self, d, h, v):
- total = 0
- for i in range(len(self.paralist)):
- p = self.paralist[i]
- result = p.whereis(d, h, v)
- if result is not None:
- return i, result
- return None
- #
- def roundtowords(self, long1, long2):
- i, offset = long1
- text = self.paralist[i].extract()
- while offset > 0 and text[offset-1] != ' ': offset = offset-1
- long1 = i, offset
- #
- i, offset = long2
- text = self.paralist[i].extract()
- n = len(text)
- while offset < n-1 and text[offset] != ' ': offset = offset+1
- long2 = i, offset
- #
- return long1, long2
- #
- def roundtoparagraphs(self, long1, long2):
- long1 = long1[0], 0
- long2 = long2[0], len(self.paralist[long2[0]].extract())
- return long1, long2
-
-
-# Formatter back-end to send the text directly to the drawing object
-class WritingBackEnd(NullBackEnd):
- #
- def __init__(self, d, width):
- self.d = d
- self.width = width
- self.lineno = 0
- #
- def addpara(self, p):
- self.lineno = p.render(self.d, 0, self.lineno, self.width)
-
-
-# A formatter receives a stream of formatting instructions and assembles
-# these into a stream of paragraphs on to a back-end. The assembly is
-# parametrized by a text measurement object, which must match the output
-# operations of the back-end. The back-end is responsible for splitting
-# paragraphs up in lines of a given maximum width. (This is done because
-# in a windowing environment, when the window size changes, there is no
-# need to redo the assembly into paragraphs, but the splitting into lines
-# must be done taking the new window size into account.)
-
-
-# Formatter base class. Initialize it with a text measurement object,
-# which is used for text measurements, and a back-end object,
-# which receives the completed paragraphs. The formatting methods are:
-# setfont(font)
-# setleftindent(nspaces)
-# setjust(type) where type is 'l', 'c', 'r', or 'lr'
-# flush()
-# vspace(nlines)
-# needvspace(nlines)
-# addword(word, nspaces)
-class BaseFormatter:
- #
- def __init__(self, d, b):
- # Drawing object used for text measurements
- self.d = d
- #
- # BackEnd object receiving completed paragraphs
- self.b = b
- #
- # Parameters of the formatting model
- self.leftindent = 0
- self.just = 'l'
- self.font = None
- self.blanklines = 0
- #
- # Parameters derived from the current font
- self.space = d.textwidth(' ')
- self.line = d.lineheight()
- self.ascent = d.baseline()
- self.descent = self.line - self.ascent
- #
- # Parameter derived from the default font
- self.n_space = self.space
- #
- # Current paragraph being built
- self.para = None
- self.nospace = 1
- #
- # Font to set on the next word
- self.nextfont = None
- #
- def newpara(self):
- return Para.Para()
- #
- def setfont(self, font):
- if font is None: return
- self.font = self.nextfont = font
- d = self.d
- d.setfont(font)
- self.space = d.textwidth(' ')
- self.line = d.lineheight()
- self.ascent = d.baseline()
- self.descent = self.line - self.ascent
- #
- def setleftindent(self, nspaces):
- self.leftindent = int(self.n_space * nspaces)
- if self.para:
- hang = self.leftindent - self.para.indent_left
- if hang > 0 and self.para.getlength() <= hang:
- self.para.makehangingtag(hang)
- self.nospace = 1
- else:
- self.flush()
- #
- def setrightindent(self, nspaces):
- self.rightindent = int(self.n_space * nspaces)
- if self.para:
- self.para.indent_right = self.rightindent
- self.flush()
- #
- def setjust(self, just):
- self.just = just
- if self.para:
- self.para.just = self.just
- #
- def flush(self):
- if self.para:
- self.b.addpara(self.para)
- self.para = None
- if self.font is not None:
- self.d.setfont(self.font)
- self.nospace = 1
- #
- def vspace(self, nlines):
- self.flush()
- if nlines > 0:
- self.para = self.newpara()
- tuple = None, '', 0, 0, 0, int(nlines*self.line), 0
- self.para.words.append(tuple)
- self.flush()
- self.blanklines = self.blanklines + nlines
- #
- def needvspace(self, nlines):
- self.flush() # Just to be sure
- if nlines > self.blanklines:
- self.vspace(nlines - self.blanklines)
- #
- def addword(self, text, space):
- if self.nospace and not text:
- return
- self.nospace = 0
- self.blanklines = 0
- if not self.para:
- self.para = self.newpara()
- self.para.indent_left = self.leftindent
- self.para.just = self.just
- self.nextfont = self.font
- space = int(space * self.space)
- self.para.words.append((self.nextfont, text,
- self.d.textwidth(text), space, space,
- self.ascent, self.descent))
- self.nextfont = None
- #
- def bgn_anchor(self, id):
- if not self.para:
- self.nospace = 0
- self.addword('', 0)
- self.para.bgn_anchor(id)
- #
- def end_anchor(self, id):
- if not self.para:
- self.nospace = 0
- self.addword('', 0)
- self.para.end_anchor(id)
-
-
-# Measuring object for measuring text as viewed on a tty
-class NullMeasurer:
- #
- def __init__(self):
- pass
- #
- def setfont(self, font):
- pass
- #
- def textwidth(self, text):
- return len(text)
- #
- def lineheight(self):
- return 1
- #
- def baseline(self):
- return 0
-
-
-# Drawing object for writing plain ASCII text to a file
-class FileWriter:
- #
- def __init__(self, fp):
- self.fp = fp
- self.lineno, self.colno = 0, 0
- #
- def setfont(self, font):
- pass
- #
- def text(self, (h, v), str):
- if not str: return
- if '\n' in str:
- raise ValueError, 'can\'t write \\n'
- while self.lineno < v:
- self.fp.write('\n')
- self.colno, self.lineno = 0, self.lineno + 1
- while self.lineno > v:
- # XXX This should never happen...
- self.fp.write('\033[A') # ANSI up arrow
- self.lineno = self.lineno - 1
- if self.colno < h:
- self.fp.write(' ' * (h - self.colno))
- elif self.colno > h:
- self.fp.write('\b' * (self.colno - h))
- self.colno = h
- self.fp.write(str)
- self.colno = h + len(str)
-
-
-# Formatting class to do nothing at all with the data
-class NullFormatter(BaseFormatter):
- #
- def __init__(self):
- d = NullMeasurer()
- b = NullBackEnd()
- BaseFormatter.__init__(self, d, b)
-
-
-# Formatting class to write directly to a file
-class WritingFormatter(BaseFormatter):
- #
- def __init__(self, fp, width):
- dm = NullMeasurer()
- dw = FileWriter(fp)
- b = WritingBackEnd(dw, width)
- BaseFormatter.__init__(self, dm, b)
- self.blanklines = 1
- #
- # Suppress multiple blank lines
- def needvspace(self, nlines):
- BaseFormatter.needvspace(self, min(1, nlines))
-
-
-# A "FunnyFormatter" writes ASCII text with a twist: *bold words*,
-# _italic text_ and _underlined words_, and `quoted text'.
-# It assumes that the fonts are 'r', 'i', 'b', 'u', 'q': (roman,
-# italic, bold, underline, quote).
-# Moreover, if the font is in upper case, the text is converted to
-# UPPER CASE.
-class FunnyFormatter(WritingFormatter):
- #
- def flush(self):
- if self.para: finalize(self.para)
- WritingFormatter.flush(self)
-
-
-# Surrounds *bold words* and _italic text_ in a paragraph with
-# appropriate markers, fixing the size (assuming these characters'
-# width is 1).
-openchar = \
- {'b':'*', 'i':'_', 'u':'_', 'q':'`', 'B':'*', 'I':'_', 'U':'_', 'Q':'`'}
-closechar = \
- {'b':'*', 'i':'_', 'u':'_', 'q':'\'', 'B':'*', 'I':'_', 'U':'_', 'Q':'\''}
-def finalize(para):
- oldfont = curfont = 'r'
- para.words.append(('r', '', 0, 0, 0, 0)) # temporary, deleted at end
- for i in range(len(para.words)):
- fo, te, wi = para.words[i][:3]
- if fo is not None: curfont = fo
- if curfont != oldfont:
- if closechar.has_key(oldfont):
- c = closechar[oldfont]
- j = i-1
- while j > 0 and para.words[j][1] == '': j = j-1
- fo1, te1, wi1 = para.words[j][:3]
- te1 = te1 + c
- wi1 = wi1 + len(c)
- para.words[j] = (fo1, te1, wi1) + \
- para.words[j][3:]
- if openchar.has_key(curfont) and te:
- c = openchar[curfont]
- te = c + te
- wi = len(c) + wi
- para.words[i] = (fo, te, wi) + \
- para.words[i][3:]
- if te: oldfont = curfont
- else: oldfont = 'r'
- if curfont in string.uppercase:
- te = string.upper(te)
- para.words[i] = (fo, te, wi) + para.words[i][3:]
- del para.words[-1]
-
-
-# Formatter back-end to draw the text in a window.
-# This has an option to draw while the paragraphs are being added,
-# to minimize the delay before the user sees anything.
-# This manages the entire "document" of the window.
-class StdwinBackEnd(SavingBackEnd):
- #
- def __init__(self, window, drawnow):
- self.window = window
- self.drawnow = drawnow
- self.width = window.getwinsize()[0]
- self.selection = None
- self.height = 0
- window.setorigin(0, 0)
- window.setdocsize(0, 0)
- self.d = window.begindrawing()
- SavingBackEnd.__init__(self)
- #
- def finish(self):
- self.d.close()
- self.d = None
- self.window.setdocsize(0, self.height)
- #
- def addpara(self, p):
- self.paralist.append(p)
- if self.drawnow:
- self.height = \
- p.render(self.d, 0, self.height, self.width)
- else:
- p.layout(self.width)
- p.left = 0
- p.top = self.height
- p.right = self.width
- p.bottom = self.height + p.height
- self.height = p.bottom
- #
- def resize(self):
- self.window.change((0, 0), (self.width, self.height))
- self.width = self.window.getwinsize()[0]
- self.height = 0
- for p in self.paralist:
- p.layout(self.width)
- p.left = 0
- p.top = self.height
- p.right = self.width
- p.bottom = self.height + p.height
- self.height = p.bottom
- self.window.change((0, 0), (self.width, self.height))
- self.window.setdocsize(0, self.height)
- #
- def redraw(self, area):
- d = self.window.begindrawing()
- (left, top), (right, bottom) = area
- d.erase(area)
- d.cliprect(area)
- for p in self.paralist:
- if top < p.bottom and p.top < bottom:
- v = p.render(d, p.left, p.top, p.right)
- if self.selection:
- self.invert(d, self.selection)
- d.close()
- #
- def setselection(self, new):
- if new:
- long1, long2 = new
- pos1 = long1[:3]
- pos2 = long2[:3]
- new = pos1, pos2
- if new != self.selection:
- d = self.window.begindrawing()
- if self.selection:
- self.invert(d, self.selection)
- if new:
- self.invert(d, new)
- d.close()
- self.selection = new
- #
- def getselection(self):
- return self.selection
- #
- def extractselection(self):
- if self.selection:
- a, b = self.selection
- return self.extractpart(a, b)
- else:
- return None
- #
- def invert(self, d, region):
- long1, long2 = region
- if long1 > long2: long1, long2 = long2, long1
- para1, pos1 = long1
- para2, pos2 = long2
- while para1 < para2:
- self.paralist[para1].invert(d, pos1, None)
- pos1 = None
- para1 = para1 + 1
- self.paralist[para2].invert(d, pos1, pos2)
- #
- def search(self, prog):
- import re, string
- if type(prog) is type(''):
- prog = re.compile(string.lower(prog))
- if self.selection:
- iold = self.selection[0][0]
- else:
- iold = -1
- hit = None
- for i in range(len(self.paralist)):
- if i == iold or i < iold and hit:
- continue
- p = self.paralist[i]
- text = string.lower(p.extract())
- match = prog.search(text)
- if match:
- a, b = match.group(0)
- long1 = i, a
- long2 = i, b
- hit = long1, long2
- if i > iold:
- break
- if hit:
- self.setselection(hit)
- i = hit[0][0]
- p = self.paralist[i]
- self.window.show((p.left, p.top), (p.right, p.bottom))
- return 1
- else:
- return 0
- #
- def showanchor(self, id):
- for i in range(len(self.paralist)):
- p = self.paralist[i]
- if p.hasanchor(id):
- long1 = i, 0
- long2 = i, len(p.extract())
- hit = long1, long2
- self.setselection(hit)
- self.window.show(
- (p.left, p.top), (p.right, p.bottom))
- break
-
-
-# GL extensions
-
-class GLFontCache:
- #
- def __init__(self):
- self.reset()
- self.setfont('')
- #
- def reset(self):
- self.fontkey = None
- self.fonthandle = None
- self.fontinfo = None
- self.fontcache = {}
- #
- def close(self):
- self.reset()
- #
- def setfont(self, fontkey):
- if fontkey == '':
- fontkey = 'Times-Roman 12'
- elif ' ' not in fontkey:
- fontkey = fontkey + ' 12'
- if fontkey == self.fontkey:
- return
- if self.fontcache.has_key(fontkey):
- handle = self.fontcache[fontkey]
- else:
- import string
- i = string.index(fontkey, ' ')
- name, sizestr = fontkey[:i], fontkey[i:]
- size = eval(sizestr)
- key1 = name + ' 1'
- key = name + ' ' + `size`
- # NB key may differ from fontkey!
- if self.fontcache.has_key(key):
- handle = self.fontcache[key]
- else:
- if self.fontcache.has_key(key1):
- handle = self.fontcache[key1]
- else:
- import fm
- handle = fm.findfont(name)
- self.fontcache[key1] = handle
- handle = handle.scalefont(size)
- self.fontcache[fontkey] = \
- self.fontcache[key] = handle
- self.fontkey = fontkey
- if self.fonthandle != handle:
- self.fonthandle = handle
- self.fontinfo = handle.getfontinfo()
- handle.setfont()
-
-
-class GLMeasurer(GLFontCache):
- #
- def textwidth(self, text):
- return self.fonthandle.getstrwidth(text)
- #
- def baseline(self):
- return self.fontinfo[6] - self.fontinfo[3]
- #
- def lineheight(self):
- return self.fontinfo[6]
-
-
-class GLWriter(GLFontCache):
- #
- # NOTES:
- # (1) Use gl.ortho2 to use X pixel coordinates!
- #
- def text(self, (h, v), text):
- import gl, fm
- gl.cmov2i(h, v + self.fontinfo[6] - self.fontinfo[3])
- fm.prstr(text)
- #
- def setfont(self, fontkey):
- oldhandle = self.fonthandle
- GLFontCache.setfont(fontkey)
- if self.fonthandle != oldhandle:
- handle.setfont()
-
-
-class GLMeasurerWriter(GLMeasurer, GLWriter):
- pass
-
-
-class GLBackEnd(SavingBackEnd):
- #
- def __init__(self, wid):
- import gl
- gl.winset(wid)
- self.wid = wid
- self.width = gl.getsize()[1]
- self.height = 0
- self.d = GLMeasurerWriter()
- SavingBackEnd.__init__(self)
- #
- def finish(self):
- pass
- #
- def addpara(self, p):
- self.paralist.append(p)
- self.height = p.render(self.d, 0, self.height, self.width)
- #
- def redraw(self):
- import gl
- gl.winset(self.wid)
- width = gl.getsize()[1]
- if width != self.width:
- setdocsize = 1
- self.width = width
- for p in self.paralist:
- p.top = p.bottom = None
- d = self.d
- v = 0
- for p in self.paralist:
- v = p.render(d, 0, v, width)
diff --git a/Lib/lib-old/grep.py b/Lib/lib-old/grep.py
deleted file mode 100644
index 2926746..0000000
--- a/Lib/lib-old/grep.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# 'grep'
-
-import regex
-from regex_syntax import *
-
-opt_show_where = 0
-opt_show_filename = 0
-opt_show_lineno = 1
-
-def grep(pat, *files):
- return ggrep(RE_SYNTAX_GREP, pat, files)
-
-def egrep(pat, *files):
- return ggrep(RE_SYNTAX_EGREP, pat, files)
-
-def emgrep(pat, *files):
- return ggrep(RE_SYNTAX_EMACS, pat, files)
-
-def ggrep(syntax, pat, files):
- if len(files) == 1 and type(files[0]) == type([]):
- files = files[0]
- global opt_show_filename
- opt_show_filename = (len(files) != 1)
- syntax = regex.set_syntax(syntax)
- try:
- prog = regex.compile(pat)
- finally:
- syntax = regex.set_syntax(syntax)
- for filename in files:
- fp = open(filename, 'r')
- lineno = 0
- while 1:
- line = fp.readline()
- if not line: break
- lineno = lineno + 1
- if prog.search(line) >= 0:
- showline(filename, lineno, line, prog)
- fp.close()
-
-def pgrep(pat, *files):
- if len(files) == 1 and type(files[0]) == type([]):
- files = files[0]
- global opt_show_filename
- opt_show_filename = (len(files) != 1)
- import re
- prog = re.compile(pat)
- for filename in files:
- fp = open(filename, 'r')
- lineno = 0
- while 1:
- line = fp.readline()
- if not line: break
- lineno = lineno + 1
- if prog.search(line):
- showline(filename, lineno, line, prog)
- fp.close()
-
-def showline(filename, lineno, line, prog):
- if line[-1:] == '\n': line = line[:-1]
- if opt_show_lineno:
- prefix = `lineno`.rjust(3) + ': '
- else:
- prefix = ''
- if opt_show_filename:
- prefix = filename + ': ' + prefix
- print prefix + line
- if opt_show_where:
- start, end = prog.regs()[0]
- line = line[:start]
- if '\t' not in line:
- prefix = ' ' * (len(prefix) + start)
- else:
- prefix = ' ' * len(prefix)
- for c in line:
- if c != '\t': c = ' '
- prefix = prefix + c
- if start == end: prefix = prefix + '\\'
- else: prefix = prefix + '^'*(end-start)
- print prefix
diff --git a/Lib/lib-old/lockfile.py b/Lib/lib-old/lockfile.py
deleted file mode 100644
index cde9b48..0000000
--- a/Lib/lib-old/lockfile.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import struct, fcntl
-
-def writelock(f):
- _lock(f, fcntl.F_WRLCK)
-
-def readlock(f):
- _lock(f, fcntl.F_RDLCK)
-
-def unlock(f):
- _lock(f, fcntl.F_UNLCK)
-
-def _lock(f, op):
- dummy = fcntl.fcntl(f.fileno(), fcntl.F_SETLKW,
- struct.pack('2h8l', op,
- 0, 0, 0, 0, 0, 0, 0, 0, 0))
diff --git a/Lib/lib-old/newdir.py b/Lib/lib-old/newdir.py
deleted file mode 100644
index 356becc..0000000
--- a/Lib/lib-old/newdir.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# New dir() function
-
-
-# This should be the new dir(), except that it should still list
-# the current local name space by default
-
-def listattrs(x):
- try:
- dictkeys = x.__dict__.keys()
- except (AttributeError, TypeError):
- dictkeys = []
- #
- try:
- methods = x.__methods__
- except (AttributeError, TypeError):
- methods = []
- #
- try:
- members = x.__members__
- except (AttributeError, TypeError):
- members = []
- #
- try:
- the_class = x.__class__
- except (AttributeError, TypeError):
- the_class = None
- #
- try:
- bases = x.__bases__
- except (AttributeError, TypeError):
- bases = ()
- #
- total = dictkeys + methods + members
- if the_class:
- # It's a class instace; add the class's attributes
- # that are functions (methods)...
- class_attrs = listattrs(the_class)
- class_methods = []
- for name in class_attrs:
- if is_function(getattr(the_class, name)):
- class_methods.append(name)
- total = total + class_methods
- elif bases:
- # It's a derived class; add the base class attributes
- for base in bases:
- base_attrs = listattrs(base)
- total = total + base_attrs
- total.sort()
- return total
- i = 0
- while i+1 < len(total):
- if total[i] == total[i+1]:
- del total[i+1]
- else:
- i = i+1
- return total
-
-
-# Helper to recognize functions
-
-def is_function(x):
- return type(x) == type(is_function)
-
-
-# Approximation of builtin dir(); but note that this lists the user's
-# variables by default, not the current local name space.
-
-def dir(x = None):
- if x is not None:
- return listattrs(x)
- else:
- import __main__
- return listattrs(__main__)
diff --git a/Lib/lib-old/ni.py b/Lib/lib-old/ni.py
deleted file mode 100644
index 074f989..0000000
--- a/Lib/lib-old/ni.py
+++ /dev/null
@@ -1,433 +0,0 @@
-"""New import scheme with package support.
-
-Quick Reference
----------------
-
-- To enable package support, execute "import ni" before importing any
- packages. Importing this module automatically installs the relevant
- import hooks.
-
-- To create a package named spam containing sub-modules ham, bacon and
- eggs, create a directory spam somewhere on Python's module search
- path (i.e. spam's parent directory must be one of the directories in
- sys.path or $PYTHONPATH); then create files ham.py, bacon.py and
- eggs.py inside spam.
-
-- To import module ham from package spam and use function hamneggs()
- from that module, you can either do
-
- import spam.ham # *not* "import spam" !!!
- spam.ham.hamneggs()
-
- or
-
- from spam import ham
- ham.hamneggs()
-
- or
-
- from spam.ham import hamneggs
- hamneggs()
-
-- Importing just "spam" does not do what you expect: it creates an
- empty package named spam if one does not already exist, but it does
- not import spam's submodules. The only submodule that is guaranteed
- to be imported is spam.__init__, if it exists. Note that
- spam.__init__ is a submodule of package spam. It can reference to
- spam's namespace via the '__.' prefix, for instance
-
- __.spam_inited = 1 # Set a package-level variable
-
-
-
-Theory of Operation
--------------------
-
-A Package is a module that can contain other modules. Packages can be
-nested. Package introduce dotted names for modules, like P.Q.M, which
-could correspond to a file P/Q/M.py found somewhere on sys.path. It
-is possible to import a package itself, though this makes little sense
-unless the package contains a module called __init__.
-
-A package has two variables that control the namespace used for
-packages and modules, both initialized to sensible defaults the first
-time the package is referenced.
-
-(1) A package's *module search path*, contained in the per-package
-variable __path__, defines a list of *directories* where submodules or
-subpackages of the package are searched. It is initialized to the
-directory containing the package. Setting this variable to None makes
-the module search path default to sys.path (this is not quite the same
-as setting it to sys.path, since the latter won't track later
-assignments to sys.path).
-
-(2) A package's *import domain*, contained in the per-package variable
-__domain__, defines a list of *packages* that are searched (using
-their respective module search paths) to satisfy imports. It is
-initialized to the list consisting of the package itself, its parent
-package, its parent's parent, and so on, ending with the root package
-(the nameless package containing all top-level packages and modules,
-whose module search path is None, implying sys.path).
-
-The default domain implements a search algorithm called "expanding
-search". An alternative search algorithm called "explicit search"
-fixes the import search path to contain only the root package,
-requiring the modules in the package to name all imported modules by
-their full name. The convention of using '__' to refer to the current
-package (both as a per-module variable and in module names) can be
-used by packages using explicit search to refer to modules in the same
-package; this combination is known as "explicit-relative search".
-
-The PackageImporter and PackageLoader classes together implement the
-following policies:
-
-- There is a root package, whose name is ''. It cannot be imported
- directly but may be referenced, e.g. by using '__' from a top-level
- module.
-
-- In each module or package, the variable '__' contains a reference to
- the parent package; in the root package, '__' points to itself.
-
-- In the name for imported modules (e.g. M in "import M" or "from M
- import ..."), a leading '__' refers to the current package (i.e.
- the package containing the current module); leading '__.__' and so
- on refer to the current package's parent, and so on. The use of
- '__' elsewhere in the module name is not supported.
-
-- Modules are searched using the "expanding search" algorithm by
- virtue of the default value for __domain__.
-
-- If A.B.C is imported, A is searched using __domain__; then
- subpackage B is searched in A using its __path__, and so on.
-
-- Built-in modules have priority: even if a file sys.py exists in a
- package, "import sys" imports the built-in sys module.
-
-- The same holds for frozen modules, for better or for worse.
-
-- Submodules and subpackages are not automatically loaded when their
- parent packages is loaded.
-
-- The construct "from package import *" is illegal. (It can still be
- used to import names from a module.)
-
-- When "from package import module1, module2, ..." is used, those
- modules are explicitly loaded.
-
-- When a package is loaded, if it has a submodule __init__, that
- module is loaded. This is the place where required submodules can
- be loaded, the __path__ variable extended, etc. The __init__ module
- is loaded even if the package was loaded only in order to create a
- stub for a sub-package: if "import P.Q.R" is the first reference to
- P, and P has a submodule __init__, P.__init__ is loaded before P.Q
- is even searched.
-
-Caveats:
-
-- It is possible to import a package that has no __init__ submodule;
- this is not particularly useful but there may be useful applications
- for it (e.g. to manipulate its search paths from the outside!).
-
-- There are no special provisions for os.chdir(). If you plan to use
- os.chdir() before you have imported all your modules, it is better
- not to have relative pathnames in sys.path. (This could actually be
- fixed by changing the implementation of path_join() in the hook to
- absolutize paths.)
-
-- Packages and modules are introduced in sys.modules as soon as their
- loading is started. When the loading is terminated by an exception,
- the sys.modules entries remain around.
-
-- There are no special measures to support mutually recursive modules,
- but it will work under the same conditions where it works in the
- flat module space system.
-
-- Sometimes dummy entries (whose value is None) are entered in
- sys.modules, to indicate that a particular module does not exist --
- this is done to speed up the expanding search algorithm when a
- module residing at a higher level is repeatedly imported (Python
- promises that importing a previously imported module is cheap!)
-
-- Although dynamically loaded extensions are allowed inside packages,
- the current implementation (hardcoded in the interpreter) of their
- initialization may cause problems if an extension invokes the
- interpreter during its initialization.
-
-- reload() may find another version of the module only if it occurs on
- the package search path. Thus, it keeps the connection to the
- package to which the module belongs, but may find a different file.
-
-XXX Need to have an explicit name for '', e.g. '__root__'.
-
-"""
-
-
-import imp
-import sys
-import __builtin__
-
-import ihooks
-from ihooks import ModuleLoader, ModuleImporter
-
-
-class PackageLoader(ModuleLoader):
-
- """A subclass of ModuleLoader with package support.
-
- find_module_in_dir() will succeed if there's a subdirectory with
- the given name; load_module() will create a stub for a package and
- load its __init__ module if it exists.
-
- """
-
- def find_module_in_dir(self, name, dir):
- if dir is not None:
- dirname = self.hooks.path_join(dir, name)
- if self.hooks.path_isdir(dirname):
- return None, dirname, ('', '', 'PACKAGE')
- return ModuleLoader.find_module_in_dir(self, name, dir)
-
- def load_module(self, name, stuff):
- file, filename, info = stuff
- suff, mode, type = info
- if type == 'PACKAGE':
- return self.load_package(name, stuff)
- if sys.modules.has_key(name):
- m = sys.modules[name]
- else:
- sys.modules[name] = m = imp.new_module(name)
- self.set_parent(m)
- if type == imp.C_EXTENSION and '.' in name:
- return self.load_dynamic(name, stuff)
- else:
- return ModuleLoader.load_module(self, name, stuff)
-
- def load_dynamic(self, name, stuff):
- file, filename, (suff, mode, type) = stuff
- # Hack around restriction in imp.load_dynamic()
- i = name.rfind('.')
- tail = name[i+1:]
- if sys.modules.has_key(tail):
- save = sys.modules[tail]
- else:
- save = None
- sys.modules[tail] = imp.new_module(name)
- try:
- m = imp.load_dynamic(tail, filename, file)
- finally:
- if save:
- sys.modules[tail] = save
- else:
- del sys.modules[tail]
- sys.modules[name] = m
- return m
-
- def load_package(self, name, stuff):
- file, filename, info = stuff
- if sys.modules.has_key(name):
- package = sys.modules[name]
- else:
- sys.modules[name] = package = imp.new_module(name)
- package.__path__ = [filename]
- self.init_package(package)
- return package
-
- def init_package(self, package):
- self.set_parent(package)
- self.set_domain(package)
- self.call_init_module(package)
-
- def set_parent(self, m):
- name = m.__name__
- if '.' in name:
- name = name[:name.rfind('.')]
- else:
- name = ''
- m.__ = sys.modules[name]
-
- def set_domain(self, package):
- name = package.__name__
- package.__domain__ = domain = [name]
- while '.' in name:
- name = name[:name.rfind('.')]
- domain.append(name)
- if name:
- domain.append('')
-
- def call_init_module(self, package):
- stuff = self.find_module('__init__', package.__path__)
- if stuff:
- m = self.load_module(package.__name__ + '.__init__', stuff)
- package.__init__ = m
-
-
-class PackageImporter(ModuleImporter):
-
- """Importer that understands packages and '__'."""
-
- def __init__(self, loader = None, verbose = 0):
- ModuleImporter.__init__(self,
- loader or PackageLoader(None, verbose), verbose)
-
- def import_module(self, name, globals={}, locals={}, fromlist=[]):
- if globals.has_key('__'):
- package = globals['__']
- else:
- # No calling context, assume in root package
- package = sys.modules['']
- if name[:3] in ('__.', '__'):
- p = package
- name = name[3:]
- while name[:3] in ('__.', '__'):
- p = p.__
- name = name[3:]
- if not name:
- return self.finish(package, p, '', fromlist)
- if '.' in name:
- i = name.find('.')
- name, tail = name[:i], name[i:]
- else:
- tail = ''
- mname = p.__name__ and p.__name__+'.'+name or name
- m = self.get1(mname)
- return self.finish(package, m, tail, fromlist)
- if '.' in name:
- i = name.find('.')
- name, tail = name[:i], name[i:]
- else:
- tail = ''
- for pname in package.__domain__:
- mname = pname and pname+'.'+name or name
- m = self.get0(mname)
- if m: break
- else:
- raise ImportError, "No such module %s" % name
- return self.finish(m, m, tail, fromlist)
-
- def finish(self, module, m, tail, fromlist):
- # Got ....A; now get ....A.B.C.D
- yname = m.__name__
- if tail and sys.modules.has_key(yname + tail): # Fast path
- yname, tail = yname + tail, ''
- m = self.get1(yname)
- while tail:
- i = tail.find('.', 1)
- if i > 0:
- head, tail = tail[:i], tail[i:]
- else:
- head, tail = tail, ''
- yname = yname + head
- m = self.get1(yname)
-
- # Got ....A.B.C.D; now finalize things depending on fromlist
- if not fromlist:
- return module
- if '__' in fromlist:
- raise ImportError, "Can't import __ from anywhere"
- if not hasattr(m, '__path__'): return m
- if '*' in fromlist:
- raise ImportError, "Can't import * from a package"
- for f in fromlist:
- if hasattr(m, f): continue
- fname = yname + '.' + f
- self.get1(fname)
- return m
-
- def get1(self, name):
- m = self.get(name)
- if not m:
- raise ImportError, "No module named %s" % name
- return m
-
- def get0(self, name):
- m = self.get(name)
- if not m:
- sys.modules[name] = None
- return m
-
- def get(self, name):
- # Internal routine to get or load a module when its parent exists
- if sys.modules.has_key(name):
- return sys.modules[name]
- if '.' in name:
- i = name.rfind('.')
- head, tail = name[:i], name[i+1:]
- else:
- head, tail = '', name
- path = sys.modules[head].__path__
- stuff = self.loader.find_module(tail, path)
- if not stuff:
- return None
- sys.modules[name] = m = self.loader.load_module(name, stuff)
- if head:
- setattr(sys.modules[head], tail, m)
- return m
-
- def reload(self, module):
- name = module.__name__
- if '.' in name:
- i = name.rfind('.')
- head, tail = name[:i], name[i+1:]
- path = sys.modules[head].__path__
- else:
- tail = name
- path = sys.modules[''].__path__
- stuff = self.loader.find_module(tail, path)
- if not stuff:
- raise ImportError, "No module named %s" % name
- return self.loader.load_module(name, stuff)
-
- def unload(self, module):
- if hasattr(module, '__path__'):
- raise ImportError, "don't know how to unload packages yet"
- PackageImporter.unload(self, module)
-
- def install(self):
- if not sys.modules.has_key(''):
- sys.modules[''] = package = imp.new_module('')
- package.__path__ = None
- self.loader.init_package(package)
- for m in sys.modules.values():
- if not m: continue
- if not hasattr(m, '__'):
- self.loader.set_parent(m)
- ModuleImporter.install(self)
-
-
-def install(v = 0):
- ihooks.install(PackageImporter(None, v))
-
-def uninstall():
- ihooks.uninstall()
-
-def ni(v = 0):
- install(v)
-
-def no():
- uninstall()
-
-def test():
- import pdb
- try:
- testproper()
- except:
- sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
- print
- print sys.last_type, ':', sys.last_value
- print
- pdb.pm()
-
-def testproper():
- install(1)
- try:
- import mactest
- print dir(mactest)
- raw_input('OK?')
- finally:
- uninstall()
-
-
-if __name__ == '__main__':
- test()
-else:
- install()
diff --git a/Lib/lib-old/packmail.py b/Lib/lib-old/packmail.py
deleted file mode 100644
index e569108..0000000
--- a/Lib/lib-old/packmail.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Module 'packmail' -- create a self-unpacking shell archive.
-
-# This module works on UNIX and on the Mac; the archives can unpack
-# themselves only on UNIX.
-
-import os
-from stat import ST_MTIME
-
-# Print help
-def help():
- print 'All fns have a file open for writing as first parameter'
- print 'pack(f, fullname, name): pack fullname as name'
- print 'packsome(f, directory, namelist): selected files from directory'
- print 'packall(f, directory): pack all files from directory'
- print 'packnotolder(f, directory, name): pack all files from directory'
- print ' that are not older than a file there'
- print 'packtree(f, directory): pack entire directory tree'
-
-# Pack one file
-def pack(outfp, file, name):
- fp = open(file, 'r')
- outfp.write('echo ' + name + '\n')
- outfp.write('sed "s/^X//" >"' + name + '" <<"!"\n')
- while 1:
- line = fp.readline()
- if not line: break
- if line[-1:] != '\n':
- line = line + '\n'
- outfp.write('X' + line)
- outfp.write('!\n')
- fp.close()
-
-# Pack some files from a directory
-def packsome(outfp, dirname, names):
- for name in names:
- print name
- file = os.path.join(dirname, name)
- pack(outfp, file, name)
-
-# Pack all files from a directory
-def packall(outfp, dirname):
- names = os.listdir(dirname)
- try:
- names.remove('.')
- except:
- pass
- try:
- names.remove('..')
- except:
- pass
- names.sort()
- packsome(outfp, dirname, names)
-
-# Pack all files from a directory that are not older than a give one
-def packnotolder(outfp, dirname, oldest):
- names = os.listdir(dirname)
- try:
- names.remove('.')
- except:
- pass
- try:
- names.remove('..')
- except:
- pass
- oldest = os.path.join(dirname, oldest)
- st = os.stat(oldest)
- mtime = st[ST_MTIME]
- todo = []
- for name in names:
- print name, '...',
- st = os.stat(os.path.join(dirname, name))
- if st[ST_MTIME] >= mtime:
- print 'Yes.'
- todo.append(name)
- else:
- print 'No.'
- todo.sort()
- packsome(outfp, dirname, todo)
-
-# Pack a whole tree (no exceptions)
-def packtree(outfp, dirname):
- print 'packtree', dirname
- outfp.write('mkdir ' + unixfix(dirname) + '\n')
- names = os.listdir(dirname)
- try:
- names.remove('.')
- except:
- pass
- try:
- names.remove('..')
- except:
- pass
- subdirs = []
- for name in names:
- fullname = os.path.join(dirname, name)
- if os.path.isdir(fullname):
- subdirs.append(fullname)
- else:
- print 'pack', fullname
- pack(outfp, fullname, unixfix(fullname))
- for subdirname in subdirs:
- packtree(outfp, subdirname)
-
-def unixfix(name):
- comps = name.split(os.sep)
- res = ''
- for comp in comps:
- if comp:
- if res: res = res + '/'
- res = res + comp
- return res
diff --git a/Lib/lib-old/poly.py b/Lib/lib-old/poly.py
deleted file mode 100644
index fe6a1dc..0000000
--- a/Lib/lib-old/poly.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# module 'poly' -- Polynomials
-
-# A polynomial is represented by a list of coefficients, e.g.,
-# [1, 10, 5] represents 1*x**0 + 10*x**1 + 5*x**2 (or 1 + 10x + 5x**2).
-# There is no way to suppress internal zeros; trailing zeros are
-# taken out by normalize().
-
-def normalize(p): # Strip unnecessary zero coefficients
- n = len(p)
- while n:
- if p[n-1]: return p[:n]
- n = n-1
- return []
-
-def plus(a, b):
- if len(a) < len(b): a, b = b, a # make sure a is the longest
- res = a[:] # make a copy
- for i in range(len(b)):
- res[i] = res[i] + b[i]
- return normalize(res)
-
-def minus(a, b):
- neg_b = map(lambda x: -x, b[:])
- return plus(a, neg_b)
-
-def one(power, coeff): # Representation of coeff * x**power
- res = []
- for i in range(power): res.append(0)
- return res + [coeff]
-
-def times(a, b):
- res = []
- for i in range(len(a)):
- for j in range(len(b)):
- res = plus(res, one(i+j, a[i]*b[j]))
- return res
-
-def power(a, n): # Raise polynomial a to the positive integral power n
- if n == 0: return [1]
- if n == 1: return a
- if n/2*2 == n:
- b = power(a, n/2)
- return times(b, b)
- return times(power(a, n-1), a)
-
-def der(a): # First derivative
- res = a[1:]
- for i in range(len(res)):
- res[i] = res[i] * (i+1)
- return res
-
-# Computing a primitive function would require rational arithmetic...
diff --git a/Lib/lib-old/rand.py b/Lib/lib-old/rand.py
deleted file mode 100644
index a557b69..0000000
--- a/Lib/lib-old/rand.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Module 'rand'
-# Don't use unless you want compatibility with C's rand()!
-
-import whrandom
-
-def srand(seed):
- whrandom.seed(seed%256, seed/256%256, seed/65536%256)
-
-def rand():
- return int(whrandom.random() * 32768.0) % 32768
-
-def choice(seq):
- return seq[rand() % len(seq)]
diff --git a/Lib/lib-old/statcache.py b/Lib/lib-old/statcache.py
deleted file mode 100644
index d478393..0000000
--- a/Lib/lib-old/statcache.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""Maintain a cache of stat() information on files.
-
-There are functions to reset the cache or to selectively remove items.
-"""
-
-import warnings
-warnings.warn("The statcache module is obsolete. Use os.stat() instead.",
- DeprecationWarning)
-del warnings
-
-import os as _os
-from stat import *
-
-__all__ = ["stat","reset","forget","forget_prefix","forget_dir",
- "forget_except_prefix","isdir"]
-
-# The cache. Keys are pathnames, values are os.stat outcomes.
-# Remember that multiple threads may be calling this! So, e.g., that
-# path in cache returns 1 doesn't mean the cache will still contain
-# path on the next line. Code defensively.
-
-cache = {}
-
-def stat(path):
- """Stat a file, possibly out of the cache."""
- ret = cache.get(path, None)
- if ret is None:
- cache[path] = ret = _os.stat(path)
- return ret
-
-def reset():
- """Clear the cache."""
- cache.clear()
-
-# For thread saftey, always use forget() internally too.
-def forget(path):
- """Remove a given item from the cache, if it exists."""
- try:
- del cache[path]
- except KeyError:
- pass
-
-def forget_prefix(prefix):
- """Remove all pathnames with a given prefix."""
- for path in cache.keys():
- if path.startswith(prefix):
- forget(path)
-
-def forget_dir(prefix):
- """Forget a directory and all entries except for entries in subdirs."""
-
- # Remove trailing separator, if any. This is tricky to do in a
- # x-platform way. For example, Windows accepts both / and \ as
- # separators, and if there's nothing *but* a separator we want to
- # preserve that this is the root. Only os.path has the platform
- # knowledge we need.
- from os.path import split, join
- prefix = split(join(prefix, "xxx"))[0]
- forget(prefix)
- for path in cache.keys():
- # First check that the path at least starts with the prefix, so
- # that when it doesn't we can avoid paying for split().
- if path.startswith(prefix) and split(path)[0] == prefix:
- forget(path)
-
-def forget_except_prefix(prefix):
- """Remove all pathnames except with a given prefix.
-
- Normally used with prefix = '/' after a chdir().
- """
-
- for path in cache.keys():
- if not path.startswith(prefix):
- forget(path)
-
-def isdir(path):
- """Return True if directory, else False."""
- try:
- st = stat(path)
- except _os.error:
- return False
- return S_ISDIR(st.st_mode)
diff --git a/Lib/lib-old/tb.py b/Lib/lib-old/tb.py
deleted file mode 100644
index 9063559..0000000
--- a/Lib/lib-old/tb.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# Print tracebacks, with a dump of local variables.
-# Also an interactive stack trace browser.
-# Note -- this module is obsolete -- use pdb.pm() instead.
-
-import sys
-import os
-from stat import *
-import linecache
-
-def br(): browser(sys.last_traceback)
-
-def tb(): printtb(sys.last_traceback)
-
-def browser(tb):
- if not tb:
- print 'No traceback.'
- return
- tblist = []
- while tb:
- tblist.append(tb)
- tb = tb.tb_next
- ptr = len(tblist)-1
- tb = tblist[ptr]
- while 1:
- if tb != tblist[ptr]:
- tb = tblist[ptr]
- print `ptr` + ':',
- printtbheader(tb)
- try:
- line = raw_input('TB: ')
- except KeyboardInterrupt:
- print '\n[Interrupted]'
- break
- except EOFError:
- print '\n[EOF]'
- break
- cmd = line.strip()
- if cmd:
- if cmd == 'quit':
- break
- elif cmd == 'list':
- browserlist(tb)
- elif cmd == 'up':
- if ptr-1 >= 0: ptr = ptr-1
- else: print 'Bottom of stack.'
- elif cmd == 'down':
- if ptr+1 < len(tblist): ptr = ptr+1
- else: print 'Top of stack.'
- elif cmd == 'locals':
- printsymbols(tb.tb_frame.f_locals)
- elif cmd == 'globals':
- printsymbols(tb.tb_frame.f_globals)
- elif cmd in ('?', 'help'):
- browserhelp()
- else:
- browserexec(tb, cmd)
-
-def browserlist(tb):
- filename = tb.tb_frame.f_code.co_filename
- lineno = tb.tb_lineno
- last = lineno
- first = max(1, last-10)
- for i in range(first, last+1):
- if i == lineno: prefix = '***' + `i`.rjust(4) + ':'
- else: prefix = `i`.rjust(7) + ':'
- line = linecache.getline(filename, i)
- if line[-1:] == '\n': line = line[:-1]
- print prefix + line
-
-def browserexec(tb, cmd):
- locals = tb.tb_frame.f_locals
- globals = tb.tb_frame.f_globals
- try:
- exec cmd+'\n' in globals, locals
- except:
- t, v = sys.exc_info()[:2]
- print '*** Exception:',
- if type(t) is type(''):
- print t,
- else:
- print t.__name__,
- if v is not None:
- print ':', v,
- print
- print 'Type help to get help.'
-
-def browserhelp():
- print
- print ' This is the traceback browser. Commands are:'
- print ' up : move one level up in the call stack'
- print ' down : move one level down in the call stack'
- print ' locals : print all local variables at this level'
- print ' globals : print all global variables at this level'
- print ' list : list source code around the failure'
- print ' help : print help (what you are reading now)'
- print ' quit : back to command interpreter'
- print ' Typing any other 1-line statement will execute it'
- print ' using the current level\'s symbol tables'
- print
-
-def printtb(tb):
- while tb:
- print1tb(tb)
- tb = tb.tb_next
-
-def print1tb(tb):
- printtbheader(tb)
- if tb.tb_frame.f_locals is not tb.tb_frame.f_globals:
- printsymbols(tb.tb_frame.f_locals)
-
-def printtbheader(tb):
- filename = tb.tb_frame.f_code.co_filename
- lineno = tb.tb_lineno
- info = '"' + filename + '"(' + `lineno` + ')'
- line = linecache.getline(filename, lineno)
- if line:
- info = info + ': ' + line.strip()
- print info
-
-def printsymbols(d):
- keys = d.keys()
- keys.sort()
- for name in keys:
- print ' ' + name.ljust(12) + ':',
- printobject(d[name], 4)
- print
-
-def printobject(v, maxlevel):
- if v is None:
- print 'None',
- elif type(v) in (type(0), type(0.0)):
- print v,
- elif type(v) is type(''):
- if len(v) > 20:
- print `v[:17] + '...'`,
- else:
- print `v`,
- elif type(v) is type(()):
- print '(',
- printlist(v, maxlevel)
- print ')',
- elif type(v) is type([]):
- print '[',
- printlist(v, maxlevel)
- print ']',
- elif type(v) is type({}):
- print '{',
- printdict(v, maxlevel)
- print '}',
- else:
- print v,
-
-def printlist(v, maxlevel):
- n = len(v)
- if n == 0: return
- if maxlevel <= 0:
- print '...',
- return
- for i in range(min(6, n)):
- printobject(v[i], maxlevel-1)
- if i+1 < n: print ',',
- if n > 6: print '...',
-
-def printdict(v, maxlevel):
- keys = v.keys()
- n = len(keys)
- if n == 0: return
- if maxlevel <= 0:
- print '...',
- return
- keys.sort()
- for i in range(min(6, n)):
- key = keys[i]
- print `key` + ':',
- printobject(v[key], maxlevel-1)
- if i+1 < n: print ',',
- if n > 6: print '...',
diff --git a/Lib/lib-old/tzparse.py b/Lib/lib-old/tzparse.py
deleted file mode 100644
index 12468b5..0000000
--- a/Lib/lib-old/tzparse.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""Parse a timezone specification."""
-
-# XXX Unfinished.
-# XXX Only the typical form "XXXhhYYY;ddd/hh,ddd/hh" is currently supported.
-
-import warnings
-warnings.warn(
- "The tzparse module is obsolete and will disappear in the future",
- DeprecationWarning)
-
-tzpat = ('^([A-Z][A-Z][A-Z])([-+]?[0-9]+)([A-Z][A-Z][A-Z]);'
- '([0-9]+)/([0-9]+),([0-9]+)/([0-9]+)$')
-
-tzprog = None
-
-def tzparse(tzstr):
- """Given a timezone spec, return a tuple of information
- (tzname, delta, dstname, daystart, hourstart, dayend, hourend),
- where 'tzname' is the name of the timezone, 'delta' is the offset
- in hours from GMT, 'dstname' is the name of the daylight-saving
- timezone, and 'daystart'/'hourstart' and 'dayend'/'hourend'
- specify the starting and ending points for daylight saving time."""
- global tzprog
- if tzprog is None:
- import re
- tzprog = re.compile(tzpat)
- match = tzprog.match(tzstr)
- if not match:
- raise ValueError, 'not the TZ syntax I understand'
- subs = []
- for i in range(1, 8):
- subs.append(match.group(i))
- for i in (1, 3, 4, 5, 6):
- subs[i] = eval(subs[i])
- [tzname, delta, dstname, daystart, hourstart, dayend, hourend] = subs
- return (tzname, delta, dstname, daystart, hourstart, dayend, hourend)
-
-def tzlocaltime(secs, params):
- """Given a Unix time in seconds and a tuple of information about
- a timezone as returned by tzparse(), return the local time in the
- form (year, month, day, hour, min, sec, yday, wday, tzname)."""
- import time
- (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = params
- year, month, days, hours, mins, secs, yday, wday, isdst = \
- time.gmtime(secs - delta*3600)
- if (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend):
- tzname = dstname
- hours = hours + 1
- return year, month, days, hours, mins, secs, yday, wday, tzname
-
-def tzset():
- """Determine the current timezone from the "TZ" environment variable."""
- global tzparams, timezone, altzone, daylight, tzname
- import os
- tzstr = os.environ['TZ']
- tzparams = tzparse(tzstr)
- timezone = tzparams[1] * 3600
- altzone = timezone - 3600
- daylight = 1
- tzname = tzparams[0], tzparams[2]
-
-def isdst(secs):
- """Return true if daylight-saving time is in effect for the given
- Unix time in the current timezone."""
- import time
- (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = \
- tzparams
- year, month, days, hours, mins, secs, yday, wday, isdst = \
- time.gmtime(secs - delta*3600)
- return (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend)
-
-tzset()
-
-def localtime(secs):
- """Get the local time in the current timezone."""
- return tzlocaltime(secs, tzparams)
-
-def test():
- from time import asctime, gmtime
- import time, sys
- now = time.time()
- x = localtime(now)
- tm = x[:-1] + (0,)
- print 'now =', now, '=', asctime(tm), x[-1]
- now = now - now % (24*3600)
- if sys.argv[1:]: now = now + eval(sys.argv[1])
- x = gmtime(now)
- tm = x[:-1] + (0,)
- print 'gmtime =', now, '=', asctime(tm), 'yday =', x[-2]
- jan1 = now - x[-2]*24*3600
- x = localtime(jan1)
- tm = x[:-1] + (0,)
- print 'jan1 =', jan1, '=', asctime(tm), x[-1]
- for d in range(85, 95) + range(265, 275):
- t = jan1 + d*24*3600
- x = localtime(t)
- tm = x[:-1] + (0,)
- print 'd =', d, 't =', t, '=', asctime(tm), x[-1]
diff --git a/Lib/lib-old/util.py b/Lib/lib-old/util.py
deleted file mode 100644
index 104af1e..0000000
--- a/Lib/lib-old/util.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Module 'util' -- some useful functions that don't fit elsewhere
-
-# NB: These are now built-in functions, but this module is provided
-# for compatibility. Don't use in new programs unless you need backward
-# compatibility (i.e. need to run with old interpreters).
-
-
-# Remove an item from a list.
-# No complaints if it isn't in the list at all.
-# If it occurs more than once, remove the first occurrence.
-#
-def remove(item, list):
- if item in list: list.remove(item)
-
-
-# Return a string containing a file's contents.
-#
-def readfile(fn):
- return readopenfile(open(fn, 'r'))
-
-
-# Read an open file until EOF.
-#
-def readopenfile(fp):
- return fp.read()
diff --git a/Lib/lib-old/whatsound.py b/Lib/lib-old/whatsound.py
deleted file mode 100644
index 1b1df23..0000000
--- a/Lib/lib-old/whatsound.py
+++ /dev/null
@@ -1 +0,0 @@
-from sndhdr import *
diff --git a/Lib/lib-old/whrandom.py b/Lib/lib-old/whrandom.py
deleted file mode 100644
index bc0d1a4..0000000
--- a/Lib/lib-old/whrandom.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""Wichman-Hill random number generator.
-
-Wichmann, B. A. & Hill, I. D. (1982)
-Algorithm AS 183:
-An efficient and portable pseudo-random number generator
-Applied Statistics 31 (1982) 188-190
-
-see also:
- Correction to Algorithm AS 183
- Applied Statistics 33 (1984) 123
-
- McLeod, A. I. (1985)
- A remark on Algorithm AS 183
- Applied Statistics 34 (1985),198-200
-
-
-USE:
-whrandom.random() yields double precision random numbers
- uniformly distributed between 0 and 1.
-
-whrandom.seed(x, y, z) must be called before whrandom.random()
- to seed the generator
-
-There is also an interface to create multiple independent
-random generators, and to choose from other ranges.
-
-
-
-Multi-threading note: the random number generator used here is not
-thread-safe; it is possible that nearly simultaneous calls in
-different theads return the same random value. To avoid this, you
-have to use a lock around all calls. (I didn't want to slow this
-down in the serial case by using a lock here.)
-"""
-
-import warnings
-warnings.warn("the whrandom module is deprecated; please use the random module",
- DeprecationWarning)
-
-# Translated by Guido van Rossum from C source provided by
-# Adrian Baddeley.
-
-
-class whrandom:
- def __init__(self, x = 0, y = 0, z = 0):
- """Initialize an instance.
- Without arguments, initialize from current time.
- With arguments (x, y, z), initialize from them."""
- self.seed(x, y, z)
-
- def seed(self, x = 0, y = 0, z = 0):
- """Set the seed from (x, y, z).
- These must be integers in the range [0, 256)."""
- if not type(x) == type(y) == type(z) == type(0):
- raise TypeError, 'seeds must be integers'
- if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
- raise ValueError, 'seeds must be in range(0, 256)'
- if 0 == x == y == z:
- # Initialize from current time
- import time
- t = long(time.time() * 256)
- t = int((t&0xffffff) ^ (t>>24))
- t, x = divmod(t, 256)
- t, y = divmod(t, 256)
- t, z = divmod(t, 256)
- # Zero is a poor seed, so substitute 1
- self._seed = (x or 1, y or 1, z or 1)
-
- def random(self):
- """Get the next random number in the range [0.0, 1.0)."""
- # This part is thread-unsafe:
- # BEGIN CRITICAL SECTION
- x, y, z = self._seed
- #
- x = (171 * x) % 30269
- y = (172 * y) % 30307
- z = (170 * z) % 30323
- #
- self._seed = x, y, z
- # END CRITICAL SECTION
- #
- return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
-
- def uniform(self, a, b):
- """Get a random number in the range [a, b)."""
- return a + (b-a) * self.random()
-
- def randint(self, a, b):
- """Get a random integer in the range [a, b] including
- both end points.
-
- (Deprecated; use randrange below.)"""
- return self.randrange(a, b+1)
-
- def choice(self, seq):
- """Choose a random element from a non-empty sequence."""
- return seq[int(self.random() * len(seq))]
-
- def randrange(self, start, stop=None, step=1, int=int, default=None):
- """Choose a random item from range(start, stop[, step]).
-
- This fixes the problem with randint() which includes the
- endpoint; in Python this is usually not what you want.
- Do not supply the 'int' and 'default' arguments."""
- # This code is a bit messy to make it fast for the
- # common case while still doing adequate error checking
- istart = int(start)
- if istart != start:
- raise ValueError, "non-integer arg 1 for randrange()"
- if stop is default:
- if istart > 0:
- return int(self.random() * istart)
- raise ValueError, "empty range for randrange()"
- istop = int(stop)
- if istop != stop:
- raise ValueError, "non-integer stop for randrange()"
- if step == 1:
- if istart < istop:
- return istart + int(self.random() *
- (istop - istart))
- raise ValueError, "empty range for randrange()"
- istep = int(step)
- if istep != step:
- raise ValueError, "non-integer step for randrange()"
- if istep > 0:
- n = (istop - istart + istep - 1) / istep
- elif istep < 0:
- n = (istop - istart + istep + 1) / istep
- else:
- raise ValueError, "zero step for randrange()"
-
- if n <= 0:
- raise ValueError, "empty range for randrange()"
- return istart + istep*int(self.random() * n)
-
-
-# Initialize from the current time
-_inst = whrandom()
-seed = _inst.seed
-random = _inst.random
-uniform = _inst.uniform
-randint = _inst.randint
-choice = _inst.choice
-randrange = _inst.randrange
diff --git a/Lib/lib-old/zmod.py b/Lib/lib-old/zmod.py
deleted file mode 100644
index 55f49df..0000000
--- a/Lib/lib-old/zmod.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# module 'zmod'
-
-# Compute properties of mathematical "fields" formed by taking
-# Z/n (the whole numbers modulo some whole number n) and an
-# irreducible polynomial (i.e., a polynomial with only complex zeros),
-# e.g., Z/5 and X**2 + 2.
-#
-# The field is formed by taking all possible linear combinations of
-# a set of d base vectors (where d is the degree of the polynomial).
-#
-# Note that this procedure doesn't yield a field for all combinations
-# of n and p: it may well be that some numbers have more than one
-# inverse and others have none. This is what we check.
-#
-# Remember that a field is a ring where each element has an inverse.
-# A ring has commutative addition and multiplication, a zero and a one:
-# 0*x = x*0 = 0, 0+x = x+0 = x, 1*x = x*1 = x. Also, the distributive
-# property holds: a*(b+c) = a*b + b*c.
-# (XXX I forget if this is an axiom or follows from the rules.)
-
-import poly
-
-
-# Example N and polynomial
-
-N = 5
-P = poly.plus(poly.one(0, 2), poly.one(2, 1)) # 2 + x**2
-
-
-# Return x modulo y. Returns >= 0 even if x < 0.
-
-def mod(x, y):
- return divmod(x, y)[1]
-
-
-# Normalize a polynomial modulo n and modulo p.
-
-def norm(a, n, p):
- a = poly.modulo(a, p)
- a = a[:]
- for i in range(len(a)): a[i] = mod(a[i], n)
- a = poly.normalize(a)
- return a
-
-
-# Make a list of all n^d elements of the proposed field.
-
-def make_all(mat):
- all = []
- for row in mat:
- for a in row:
- all.append(a)
- return all
-
-def make_elements(n, d):
- if d == 0: return [poly.one(0, 0)]
- sub = make_elements(n, d-1)
- all = []
- for a in sub:
- for i in range(n):
- all.append(poly.plus(a, poly.one(d-1, i)))
- return all
-
-def make_inv(all, n, p):
- x = poly.one(1, 1)
- inv = []
- for a in all:
- inv.append(norm(poly.times(a, x), n, p))
- return inv
-
-def checkfield(n, p):
- all = make_elements(n, len(p)-1)
- inv = make_inv(all, n, p)
- all1 = all[:]
- inv1 = inv[:]
- all1.sort()
- inv1.sort()
- if all1 == inv1: print 'BINGO!'
- else:
- print 'Sorry:', n, p
- print all
- print inv
-
-def rj(s, width):
- if type(s) is not type(''): s = `s`
- n = len(s)
- if n >= width: return s
- return ' '*(width - n) + s
-
-def lj(s, width):
- if type(s) is not type(''): s = `s`
- n = len(s)
- if n >= width: return s
- return s + ' '*(width - n)
diff --git a/Lib/lib-tk/Tix.py b/Lib/lib-tk/Tix.py
index 2fb1307..14c3c24 100755
--- a/Lib/lib-tk/Tix.py
+++ b/Lib/lib-tk/Tix.py
@@ -1541,8 +1541,8 @@ class Tree(TixWidget):
'''This command is used to indicate whether the entry given by
entryPath has children entries and whether the children are visible. mode
must be one of open, close or none. If mode is set to open, a (+)
- indicator is drawn next to the entry. If mode is set to close, a (-)
- indicator is drawn next to the entry. If mode is set to none, no
+ indicator is drawn next the the entry. If mode is set to close, a (-)
+ indicator is drawn next the the entry. If mode is set to none, no
indicators will be drawn for this entry. The default mode is none. The
open mode indicates the entry has hidden children and this entry can be
opened by the user. The close mode indicates that all the children of the
@@ -1773,6 +1773,7 @@ class CObjView(TixWidget):
# FIXME: It should inherit -superclass tixScrolledWidget
pass
+
class Grid(TixWidget):
'''The Tix Grid command creates a new window and makes it into a
tixGrid widget. Additional options, may be specified on the command
@@ -1787,26 +1788,101 @@ class Grid(TixWidget):
border.
Subwidgets - None'''
- pass
-
+ # valid specific resources as of Tk 8.4
+ # editdonecmd, editnotifycmd, floatingcols, floatingrows, formatcmd,
+ # highlightbackground, highlightcolor, leftmargin, itemtype, selectmode,
+ # selectunit, topmargin,
+ def __init__(self, master=None, cnf={}, **kw):
+ static= []
+ self.cnf= cnf
+ TixWidget.__init__(self, master, 'tixGrid', static, cnf, kw)
+
+ # valid options as of Tk 8.4
+ # anchor, bdtype, cget, configure, delete, dragsite, dropsite, entrycget, edit
+ # entryconfigure, format, geometryinfo, info, index, move, nearest, selection
+ # set, size, unset, xview, yview
# def anchor option ?args ...?
+ def anchor_get(self):
+ "Get the (x,y) coordinate of the current anchor cell"
+ return self._getints(self.tk.call(self, 'anchor', 'get'))
+
# def bdtype
# def delete dim from ?to?
+ def delete_row(self, from_, to=None):
+ """Delete rows between from_ and to inclusive.
+ If to is not provided, delete only row at from_"""
+ if to is None:
+ self.tk.call(self, 'delete', 'row', from_)
+ else:
+ self.tk.call(self, 'delete', 'row', from_, to)
+ def delete_column(self, from_, to=None):
+ """Delete columns between from_ and to inclusive.
+ If to is not provided, delete only column at from_"""
+ if to is None:
+ self.tk.call(self, 'delete', 'column', from_)
+ else:
+ self.tk.call(self, 'delete', 'column', from_, to)
# def edit apply
# def edit set x y
- # def entrycget x y option
- # def entryconfigure x y ?option? ?value option value ...?
+
+ def entrycget(self, x, y, option):
+ "Get the option value for cell at (x,y)"
+ return self.tk.call(self, 'entrycget', x, y, option)
+
+ def entryconfigure(self, x, y, **kw):
+ return self.tk.call(self, 'entryconfigure', x, y, *self._options(None, kw))
# def format
# def index
+
+ def info_exists(self, x, y):
+ "Return True if display item exists at (x,y)"
+ return bool(int(self.tk.call(self, 'info', 'exists', x, y)))
+
+ def info_bbox(self, x, y):
+ # This seems to always return '', at least for 'text' displayitems
+ return self.tk.call(self, 'info', 'bbox', x, y)
+
+ def nearest(self, x, y):
+ "Return coordinate of cell nearest pixel coordinate (x,y)"
+ return self._getints(self.tk.call(self, 'nearest', x, y))
+
+ # def selection adjust
+ # def selection clear
+ # def selection includes
+ # def selection set
+ # def selection toggle
# def move dim from to offset
- # def set x y ?-itemtype type? ?option value...?
+
+ def set(self, x, y, itemtype=None, **kw):
+ args= self._options(self.cnf, kw)
+ if itemtype is not None:
+ args= ('-itemtype', itemtype) + args
+ self.tk.call(self, 'set', x, y, *args)
+
# def size dim index ?option value ...?
# def unset x y
- # def xview
- # def yview
-class ScrolledGrid(TixWidget):
+ def xview(self):
+ return self._getdoubles(self.tk.call(self, 'xview'))
+ def xview_moveto(self, fraction):
+ self.tk.call(self,'xview', 'moveto', fraction)
+ def xview_scroll(self, count, what="units"):
+ "Scroll right (count>0) or left <count> of units|pages"
+ self.tk.call(self, 'xview', 'scroll', count, what)
+
+ def yview(self):
+ return self._getdoubles(self.tk.call(self, 'yview'))
+ def yview_moveto(self, fraction):
+ self.tk.call(self,'ysview', 'moveto', fraction)
+ def yview_scroll(self, count, what="units"):
+ "Scroll down (count>0) or up <count> of units|pages"
+ self.tk.call(self, 'yview', 'scroll', count, what)
+
+class ScrolledGrid(Grid):
'''Scrolled Grid widgets'''
# FIXME: It should inherit -superclass tixScrolledWidget
- pass
+ def __init__(self, master=None, cnf={}, **kw):
+ static= []
+ self.cnf= cnf
+ TixWidget.__init__(self, master, 'tixScrolledGrid', static, cnf, kw)
diff --git a/Lib/lib-tk/Tkinter.py b/Lib/lib-tk/Tkinter.py
index d600cd7..0ba954e 100644
--- a/Lib/lib-tk/Tkinter.py
+++ b/Lib/lib-tk/Tkinter.py
@@ -449,18 +449,15 @@ class Misc:
# I'd rather use time.sleep(ms*0.001)
self.tk.call('after', ms)
else:
- # XXX Disgusting hack to clean up after calling func
- tmp = []
- def callit(func=func, args=args, self=self, tmp=tmp):
+ def callit():
try:
func(*args)
finally:
try:
- self.deletecommand(tmp[0])
+ self.deletecommand(name)
except TclError:
pass
name = self._register(callit)
- tmp.append(name)
return self.tk.call('after', ms, name)
def after_idle(self, func, *args):
"""Call FUNC once if the Tcl main loop has no event to
@@ -486,7 +483,24 @@ class Misc:
def bell(self, displayof=0):
"""Ring a display's bell."""
self.tk.call(('bell',) + self._displayof(displayof))
+
# Clipboard handling:
+ def clipboard_get(self, **kw):
+ """Retrieve data from the clipboard on window's display.
+
+ The window keyword defaults to the root window of the Tkinter
+ application.
+
+ The type keyword specifies the form in which the data is
+ to be returned and should be an atom name such as STRING
+ or FILE_NAME. Type defaults to STRING.
+
+ This command is equivalent to:
+
+ selection_get(CLIPBOARD)
+ """
+ return self.tk.call(('clipboard', 'get') + self._options(kw))
+
def clipboard_clear(self, **kw):
"""Clear the data in the Tk clipboard.
diff --git a/Lib/lib-tk/tkFont.py b/Lib/lib-tk/tkFont.py
index 5b5a6ba..15dea2e 100644
--- a/Lib/lib-tk/tkFont.py
+++ b/Lib/lib-tk/tkFont.py
@@ -108,7 +108,9 @@ class Font:
try:
if self.delete_font:
self._call("font", "delete", self.name)
- except (AttributeError, Tkinter.TclError):
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception:
pass
def copy(self):
diff --git a/Lib/linecache.py b/Lib/linecache.py
index 2ccc6c6..f49695a 100644
--- a/Lib/linecache.py
+++ b/Lib/linecache.py
@@ -10,8 +10,8 @@ import os
__all__ = ["getline", "clearcache", "checkcache"]
-def getline(filename, lineno):
- lines = getlines(filename)
+def getline(filename, lineno, module_globals=None):
+ lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
@@ -30,14 +30,14 @@ def clearcache():
cache = {}
-def getlines(filename):
+def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
- return updatecache(filename)
+ return updatecache(filename, module_globals)
def checkcache(filename=None):
@@ -54,6 +54,8 @@ def checkcache(filename=None):
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
+ if mtime is None:
+ continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
@@ -63,7 +65,7 @@ def checkcache(filename=None):
del cache[filename]
-def updatecache(filename):
+def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
@@ -72,12 +74,34 @@ def updatecache(filename):
del cache[filename]
if not filename or filename[0] + filename[-1] == '<>':
return []
+
fullname = filename
try:
stat = os.stat(fullname)
except os.error, msg:
- # Try looking through the module search path.
basename = os.path.split(filename)[1]
+
+ # Try for a __loader__, if available
+ if module_globals and '__loader__' in module_globals:
+ name = module_globals.get('__name__')
+ loader = module_globals['__loader__']
+ get_source = getattr(loader, 'get_source', None)
+
+ if name and get_source:
+ if basename.startswith(name.split('.')[-1]+'.'):
+ try:
+ data = get_source(name)
+ except (ImportError, IOError):
+ pass
+ else:
+ cache[filename] = (
+ len(data), None,
+ [line+'\n' for line in data.splitlines()], fullname
+ )
+ return cache[filename][2]
+
+ # Try looking through the module search path.
+
for dirname in sys.path:
# When using imputil, sys.path may contain things other than
# strings; ignore them when it happens.
diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py
index 7db0dab..9798931 100644
--- a/Lib/logging/__init__.py
+++ b/Lib/logging/__init__.py
@@ -719,7 +719,7 @@ class StreamHandler(Handler):
If strm is not specified, sys.stderr is used.
"""
Handler.__init__(self)
- if not strm:
+ if strm is None:
strm = sys.stderr
self.stream = strm
self.formatter = None
diff --git a/Lib/mimetools.py b/Lib/mimetools.py
index 0b698ac..8c1cc19 100644
--- a/Lib/mimetools.py
+++ b/Lib/mimetools.py
@@ -127,7 +127,10 @@ def choose_boundary():
import time
if _prefix is None:
import socket
- hostid = socket.gethostbyname(socket.gethostname())
+ try:
+ hostid = socket.gethostbyname(socket.gethostname())
+ except socket.gaierror:
+ hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
diff --git a/Lib/mimetypes.py b/Lib/mimetypes.py
index 7a8b765..bee2ff7 100644
--- a/Lib/mimetypes.py
+++ b/Lib/mimetypes.py
@@ -315,162 +315,171 @@ def read_mime_types(file):
return db.types_map[True]
-suffix_map = {
- '.tgz': '.tar.gz',
- '.taz': '.tar.gz',
- '.tz': '.tar.gz',
- }
-
-encodings_map = {
- '.gz': 'gzip',
- '.Z': 'compress',
- }
-
-# Before adding new types, make sure they are either registered with IANA, at
-# http://www.isi.edu/in-notes/iana/assignments/media-types
-# or extensions, i.e. using the x- prefix
-
-# If you add to these, please keep them sorted!
-types_map = {
- '.a' : 'application/octet-stream',
- '.ai' : 'application/postscript',
- '.aif' : 'audio/x-aiff',
- '.aifc' : 'audio/x-aiff',
- '.aiff' : 'audio/x-aiff',
- '.au' : 'audio/basic',
- '.avi' : 'video/x-msvideo',
- '.bat' : 'text/plain',
- '.bcpio' : 'application/x-bcpio',
- '.bin' : 'application/octet-stream',
- '.bmp' : 'image/x-ms-bmp',
- '.c' : 'text/plain',
- # Duplicates :(
- '.cdf' : 'application/x-cdf',
- '.cdf' : 'application/x-netcdf',
- '.cpio' : 'application/x-cpio',
- '.csh' : 'application/x-csh',
- '.css' : 'text/css',
- '.dll' : 'application/octet-stream',
- '.doc' : 'application/msword',
- '.dot' : 'application/msword',
- '.dvi' : 'application/x-dvi',
- '.eml' : 'message/rfc822',
- '.eps' : 'application/postscript',
- '.etx' : 'text/x-setext',
- '.exe' : 'application/octet-stream',
- '.gif' : 'image/gif',
- '.gtar' : 'application/x-gtar',
- '.h' : 'text/plain',
- '.hdf' : 'application/x-hdf',
- '.htm' : 'text/html',
- '.html' : 'text/html',
- '.ief' : 'image/ief',
- '.jpe' : 'image/jpeg',
- '.jpeg' : 'image/jpeg',
- '.jpg' : 'image/jpeg',
- '.js' : 'application/x-javascript',
- '.ksh' : 'text/plain',
- '.latex' : 'application/x-latex',
- '.m1v' : 'video/mpeg',
- '.man' : 'application/x-troff-man',
- '.me' : 'application/x-troff-me',
- '.mht' : 'message/rfc822',
- '.mhtml' : 'message/rfc822',
- '.mif' : 'application/x-mif',
- '.mov' : 'video/quicktime',
- '.movie' : 'video/x-sgi-movie',
- '.mp2' : 'audio/mpeg',
- '.mp3' : 'audio/mpeg',
- '.mpa' : 'video/mpeg',
- '.mpe' : 'video/mpeg',
- '.mpeg' : 'video/mpeg',
- '.mpg' : 'video/mpeg',
- '.ms' : 'application/x-troff-ms',
- '.nc' : 'application/x-netcdf',
- '.nws' : 'message/rfc822',
- '.o' : 'application/octet-stream',
- '.obj' : 'application/octet-stream',
- '.oda' : 'application/oda',
- '.p12' : 'application/x-pkcs12',
- '.p7c' : 'application/pkcs7-mime',
- '.pbm' : 'image/x-portable-bitmap',
- '.pdf' : 'application/pdf',
- '.pfx' : 'application/x-pkcs12',
- '.pgm' : 'image/x-portable-graymap',
- '.pl' : 'text/plain',
- '.png' : 'image/png',
- '.pnm' : 'image/x-portable-anymap',
- '.pot' : 'application/vnd.ms-powerpoint',
- '.ppa' : 'application/vnd.ms-powerpoint',
- '.ppm' : 'image/x-portable-pixmap',
- '.pps' : 'application/vnd.ms-powerpoint',
- '.ppt' : 'application/vnd.ms-powerpoint',
- '.ps' : 'application/postscript',
- '.pwz' : 'application/vnd.ms-powerpoint',
- '.py' : 'text/x-python',
- '.pyc' : 'application/x-python-code',
- '.pyo' : 'application/x-python-code',
- '.qt' : 'video/quicktime',
- '.ra' : 'audio/x-pn-realaudio',
- '.ram' : 'application/x-pn-realaudio',
- '.ras' : 'image/x-cmu-raster',
- '.rdf' : 'application/xml',
- '.rgb' : 'image/x-rgb',
- '.roff' : 'application/x-troff',
- '.rtx' : 'text/richtext',
- '.sgm' : 'text/x-sgml',
- '.sgml' : 'text/x-sgml',
- '.sh' : 'application/x-sh',
- '.shar' : 'application/x-shar',
- '.snd' : 'audio/basic',
- '.so' : 'application/octet-stream',
- '.src' : 'application/x-wais-source',
- '.sv4cpio': 'application/x-sv4cpio',
- '.sv4crc' : 'application/x-sv4crc',
- '.swf' : 'application/x-shockwave-flash',
- '.t' : 'application/x-troff',
- '.tar' : 'application/x-tar',
- '.tcl' : 'application/x-tcl',
- '.tex' : 'application/x-tex',
- '.texi' : 'application/x-texinfo',
- '.texinfo': 'application/x-texinfo',
- '.tif' : 'image/tiff',
- '.tiff' : 'image/tiff',
- '.tr' : 'application/x-troff',
- '.tsv' : 'text/tab-separated-values',
- '.txt' : 'text/plain',
- '.ustar' : 'application/x-ustar',
- '.vcf' : 'text/x-vcard',
- '.wav' : 'audio/x-wav',
- '.wiz' : 'application/msword',
- '.wsdl' : 'application/xml',
- '.xbm' : 'image/x-xbitmap',
- '.xlb' : 'application/vnd.ms-excel',
- # Duplicates :(
- '.xls' : 'application/excel',
- '.xls' : 'application/vnd.ms-excel',
- '.xml' : 'text/xml',
- '.xpdl' : 'application/xml',
- '.xpm' : 'image/x-xpixmap',
- '.xsl' : 'application/xml',
- '.xwd' : 'image/x-xwindowdump',
- '.zip' : 'application/zip',
- }
-
-# These are non-standard types, commonly found in the wild. They will only
-# match if strict=0 flag is given to the API methods.
-
-# Please sort these too
-common_types = {
- '.jpg' : 'image/jpg',
- '.mid' : 'audio/midi',
- '.midi': 'audio/midi',
- '.pct' : 'image/pict',
- '.pic' : 'image/pict',
- '.pict': 'image/pict',
- '.rtf' : 'application/rtf',
- '.xul' : 'text/xul'
- }
+def _default_mime_types():
+ global suffix_map
+ global encodings_map
+ global types_map
+ global common_types
+
+ suffix_map = {
+ '.tgz': '.tar.gz',
+ '.taz': '.tar.gz',
+ '.tz': '.tar.gz',
+ }
+
+ encodings_map = {
+ '.gz': 'gzip',
+ '.Z': 'compress',
+ }
+
+ # Before adding new types, make sure they are either registered with IANA,
+ # at http://www.isi.edu/in-notes/iana/assignments/media-types
+ # or extensions, i.e. using the x- prefix
+
+ # If you add to these, please keep them sorted!
+ types_map = {
+ '.a' : 'application/octet-stream',
+ '.ai' : 'application/postscript',
+ '.aif' : 'audio/x-aiff',
+ '.aifc' : 'audio/x-aiff',
+ '.aiff' : 'audio/x-aiff',
+ '.au' : 'audio/basic',
+ '.avi' : 'video/x-msvideo',
+ '.bat' : 'text/plain',
+ '.bcpio' : 'application/x-bcpio',
+ '.bin' : 'application/octet-stream',
+ '.bmp' : 'image/x-ms-bmp',
+ '.c' : 'text/plain',
+ # Duplicates :(
+ '.cdf' : 'application/x-cdf',
+ '.cdf' : 'application/x-netcdf',
+ '.cpio' : 'application/x-cpio',
+ '.csh' : 'application/x-csh',
+ '.css' : 'text/css',
+ '.dll' : 'application/octet-stream',
+ '.doc' : 'application/msword',
+ '.dot' : 'application/msword',
+ '.dvi' : 'application/x-dvi',
+ '.eml' : 'message/rfc822',
+ '.eps' : 'application/postscript',
+ '.etx' : 'text/x-setext',
+ '.exe' : 'application/octet-stream',
+ '.gif' : 'image/gif',
+ '.gtar' : 'application/x-gtar',
+ '.h' : 'text/plain',
+ '.hdf' : 'application/x-hdf',
+ '.htm' : 'text/html',
+ '.html' : 'text/html',
+ '.ief' : 'image/ief',
+ '.jpe' : 'image/jpeg',
+ '.jpeg' : 'image/jpeg',
+ '.jpg' : 'image/jpeg',
+ '.js' : 'application/x-javascript',
+ '.ksh' : 'text/plain',
+ '.latex' : 'application/x-latex',
+ '.m1v' : 'video/mpeg',
+ '.man' : 'application/x-troff-man',
+ '.me' : 'application/x-troff-me',
+ '.mht' : 'message/rfc822',
+ '.mhtml' : 'message/rfc822',
+ '.mif' : 'application/x-mif',
+ '.mov' : 'video/quicktime',
+ '.movie' : 'video/x-sgi-movie',
+ '.mp2' : 'audio/mpeg',
+ '.mp3' : 'audio/mpeg',
+ '.mpa' : 'video/mpeg',
+ '.mpe' : 'video/mpeg',
+ '.mpeg' : 'video/mpeg',
+ '.mpg' : 'video/mpeg',
+ '.ms' : 'application/x-troff-ms',
+ '.nc' : 'application/x-netcdf',
+ '.nws' : 'message/rfc822',
+ '.o' : 'application/octet-stream',
+ '.obj' : 'application/octet-stream',
+ '.oda' : 'application/oda',
+ '.p12' : 'application/x-pkcs12',
+ '.p7c' : 'application/pkcs7-mime',
+ '.pbm' : 'image/x-portable-bitmap',
+ '.pdf' : 'application/pdf',
+ '.pfx' : 'application/x-pkcs12',
+ '.pgm' : 'image/x-portable-graymap',
+ '.pl' : 'text/plain',
+ '.png' : 'image/png',
+ '.pnm' : 'image/x-portable-anymap',
+ '.pot' : 'application/vnd.ms-powerpoint',
+ '.ppa' : 'application/vnd.ms-powerpoint',
+ '.ppm' : 'image/x-portable-pixmap',
+ '.pps' : 'application/vnd.ms-powerpoint',
+ '.ppt' : 'application/vnd.ms-powerpoint',
+ '.ps' : 'application/postscript',
+ '.pwz' : 'application/vnd.ms-powerpoint',
+ '.py' : 'text/x-python',
+ '.pyc' : 'application/x-python-code',
+ '.pyo' : 'application/x-python-code',
+ '.qt' : 'video/quicktime',
+ '.ra' : 'audio/x-pn-realaudio',
+ '.ram' : 'application/x-pn-realaudio',
+ '.ras' : 'image/x-cmu-raster',
+ '.rdf' : 'application/xml',
+ '.rgb' : 'image/x-rgb',
+ '.roff' : 'application/x-troff',
+ '.rtx' : 'text/richtext',
+ '.sgm' : 'text/x-sgml',
+ '.sgml' : 'text/x-sgml',
+ '.sh' : 'application/x-sh',
+ '.shar' : 'application/x-shar',
+ '.snd' : 'audio/basic',
+ '.so' : 'application/octet-stream',
+ '.src' : 'application/x-wais-source',
+ '.sv4cpio': 'application/x-sv4cpio',
+ '.sv4crc' : 'application/x-sv4crc',
+ '.swf' : 'application/x-shockwave-flash',
+ '.t' : 'application/x-troff',
+ '.tar' : 'application/x-tar',
+ '.tcl' : 'application/x-tcl',
+ '.tex' : 'application/x-tex',
+ '.texi' : 'application/x-texinfo',
+ '.texinfo': 'application/x-texinfo',
+ '.tif' : 'image/tiff',
+ '.tiff' : 'image/tiff',
+ '.tr' : 'application/x-troff',
+ '.tsv' : 'text/tab-separated-values',
+ '.txt' : 'text/plain',
+ '.ustar' : 'application/x-ustar',
+ '.vcf' : 'text/x-vcard',
+ '.wav' : 'audio/x-wav',
+ '.wiz' : 'application/msword',
+ '.wsdl' : 'application/xml',
+ '.xbm' : 'image/x-xbitmap',
+ '.xlb' : 'application/vnd.ms-excel',
+ # Duplicates :(
+ '.xls' : 'application/excel',
+ '.xls' : 'application/vnd.ms-excel',
+ '.xml' : 'text/xml',
+ '.xpdl' : 'application/xml',
+ '.xpm' : 'image/x-xpixmap',
+ '.xsl' : 'application/xml',
+ '.xwd' : 'image/x-xwindowdump',
+ '.zip' : 'application/zip',
+ }
+
+ # These are non-standard types, commonly found in the wild. They will
+ # only match if strict=0 flag is given to the API methods.
+
+ # Please sort these too
+ common_types = {
+ '.jpg' : 'image/jpg',
+ '.mid' : 'audio/midi',
+ '.midi': 'audio/midi',
+ '.pct' : 'image/pict',
+ '.pic' : 'image/pict',
+ '.pict': 'image/pict',
+ '.rtf' : 'application/rtf',
+ '.xul' : 'text/xul'
+ }
+
+
+_default_mime_types()
if __name__ == '__main__':
diff --git a/Lib/pdb.py b/Lib/pdb.py
index 1aa2eae..5b7ea99 100755
--- a/Lib/pdb.py
+++ b/Lib/pdb.py
@@ -91,6 +91,12 @@ class Pdb(bdb.Bdb, cmd.Cmd):
self.rcLines.append(line)
rcFile.close()
+ self.commands = {} # associates a command list to breakpoint numbers
+ self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
+ self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
+ self.commands_defining = False # True while in the process of defining a command list
+ self.commands_bnum = None # The breakpoint number for which we are defining a list
+
def reset(self):
bdb.Bdb.reset(self)
self.forget()
@@ -137,7 +143,28 @@ class Pdb(bdb.Bdb, cmd.Cmd):
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
- self.interaction(frame, None)
+ if self.bp_commands(frame):
+ self.interaction(frame, None)
+
+ def bp_commands(self,frame):
+ """ Call every command that was set for the current active breakpoint (if there is one)
+ Returns True if the normal interaction function must be called, False otherwise """
+ #self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
+ if getattr(self,"currentbp",False) and self.currentbp in self.commands:
+ currentbp = self.currentbp
+ self.currentbp = 0
+ lastcmd_back = self.lastcmd
+ self.setup(frame, None)
+ for line in self.commands[currentbp]:
+ self.onecmd(line)
+ self.lastcmd = lastcmd_back
+ if not self.commands_silent[currentbp]:
+ self.print_stack_entry(self.stack[self.curindex])
+ if self.commands_doprompt[currentbp]:
+ self.cmdloop()
+ self.forget()
+ return
+ return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
@@ -202,12 +229,70 @@ class Pdb(bdb.Bdb, cmd.Cmd):
line = line[:marker].rstrip()
return line
+ def onecmd(self, line):
+ """Interpret the argument as though it had been typed in response
+ to the prompt.
+
+ Checks wether this line is typed in the normal prompt or in a breakpoint command list definition
+ """
+ if not self.commands_defining:
+ return cmd.Cmd.onecmd(self, line)
+ else:
+ return self.handle_command_def(line)
+
+ def handle_command_def(self,line):
+ """ Handles one command line during command list definition. """
+ cmd, arg, line = self.parseline(line)
+ if cmd == 'silent':
+ self.commands_silent[self.commands_bnum] = True
+ return # continue to handle other cmd def in the cmd list
+ elif cmd == 'end':
+ self.cmdqueue = []
+ return 1 # end of cmd list
+ cmdlist = self.commands[self.commands_bnum]
+ if (arg):
+ cmdlist.append(cmd+' '+arg)
+ else:
+ cmdlist.append(cmd)
+ # Determine if we must stop
+ try:
+ func = getattr(self, 'do_' + cmd)
+ except AttributeError:
+ func = self.default
+ if func.func_name in self.commands_resuming : # one of the resuming commands.
+ self.commands_doprompt[self.commands_bnum] = False
+ self.cmdqueue = []
+ return 1
+ return
+
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
+ def do_commands(self, arg):
+ """Defines a list of commands associated to a breakpoint
+ Those commands will be executed whenever the breakpoint causes the program to stop execution."""
+ if not arg:
+ bnum = len(bdb.Breakpoint.bpbynumber)-1
+ else:
+ try:
+ bnum = int(arg)
+ except:
+ print "Usage : commands [bnum]\n ...\n end"
+ return
+ self.commands_bnum = bnum
+ self.commands[bnum] = []
+ self.commands_doprompt[bnum] = True
+ self.commands_silent[bnum] = False
+ prompt_back = self.prompt
+ self.prompt = '(com) '
+ self.commands_defining = True
+ self.cmdloop()
+ self.commands_defining = False
+ self.prompt = prompt_back
+
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
@@ -691,6 +776,9 @@ class Pdb(bdb.Bdb, cmd.Cmd):
if args[0] in self.aliases:
del self.aliases[args[0]]
+ #list of all the commands making the program resume execution.
+ commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', 'do_quit', 'do_jump']
+
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
@@ -944,6 +1032,41 @@ alias ps pi self
print """unalias name
Deletes the specified alias."""
+ def help_commands(self):
+ print """commands [bpnumber]
+(com) ...
+(com) end
+(Pdb)
+
+Specify a list of commands for breakpoint number bpnumber. The
+commands themselves appear on the following lines. Type a line
+containing just 'end' to terminate the commands.
+
+To remove all commands from a breakpoint, type commands and
+follow it immediately with end; that is, give no commands.
+
+With no bpnumber argument, commands refers to the last
+breakpoint set.
+
+You can use breakpoint commands to start your program up again.
+Simply use the continue command, or step, or any other
+command that resumes execution.
+
+Specifying any command resuming execution (currently continue,
+step, next, return, jump, quit and their abbreviations) terminates
+the command list (as if that command was immediately followed by end).
+This is because any time you resume execution
+(even with a simple next or step), you may encounter
+another breakpoint--which could have its own command list, leading to
+ambiguities about which list to execute.
+
+ If you use the 'silent' command in the command list, the
+usual message about stopping at a breakpoint is not printed. This may
+be desirable for breakpoints that are to print a specific message and
+then continue. If none of the other commands print anything, you
+see no sign that the breakpoint was reached.
+"""
+
def help_pdb(self):
help()
diff --git a/Lib/pkg_resources.py b/Lib/pkg_resources.py
new file mode 100644
index 0000000..db6cc90
--- /dev/null
+++ b/Lib/pkg_resources.py
@@ -0,0 +1,2377 @@
+"""Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+import sys, os, zipimport, time, re, imp, new, pkgutil # XXX
+from sets import ImmutableSet
+from os import utime, rename, unlink # capture these to bypass sandboxing
+from os import open as os_open
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
+ of Mac OS X that would be required to *use* extensions produced by
+ distutils. But what we want when checking compatibility is to know the
+ version of Mac OS X that we are *running*. To allow usage of packages that
+ explicitly require a newer version of Mac OS X, we must also know the
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform(); m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
+ plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
+ except ValueError:
+ pass # not Mac OS X
+ return plat
+
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
+ 'require', 'run_script', 'get_provider', 'get_distribution',
+ 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
+ 'ExtractionError',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
+ def __repr__(self):
+ return self.__class__.__name__+repr(self.args)
+
+class VersionConflict(ResolutionError):
+ """An already-installed version conflicts with the requested version"""
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+_provider_factories = {}
+PY_MAJOR = sys.version[:3]
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq,Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
+def _macosx_vers(_cache=[]):
+ if not _cache:
+ info = os.popen('/usr/bin/sw_vers').read().splitlines()
+ for line in info:
+ key, value = line.split(None, 1)
+ if key == 'ProductVersion:':
+ _cache.append(value.strip().split("."))
+ break
+ else:
+ raise ValueError, "What?!"
+ return _cache[0]
+
+def _macosx_arch(machine):
+ return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+ needs some hacks for Linux and Mac OS X.
+ """
+ from distutils.util import get_platform
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
+ version = _macosx_vers()
+ machine = os.uname()[4].replace(" ", "_")
+ return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
+ _macosx_arch(machine))
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+get_platform = get_build_platform # XXX backward compat
+
+def compatible_platforms(provided,required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
+ if provided is None or required is None or provided==required:
+ return True # easy case
+
+ # Mac OS X special cases
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
+ # use the new macosx designation.
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+
+ #import warnings
+ #warnings.warn("Mac eggs should be rebuilt to "
+ # "use the macosx designation instead of darwin.",
+ # category=DeprecationWarning)
+ return True
+ return False # egg isn't macosx or legacy darwin
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+run_main = run_script # backward compatibility
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
+ if isinstance(dist,basestring): dist = Requirement.parse(dist)
+ if isinstance(dist,Requirement): dist = get_provider(dist)
+ if not isinstance(dist,Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+try:
+ from pkgutil import get_importer
+except ImportError:
+ import _pkgutil as pkgutil
+ get_importer = pkgutil.get_importer
+else:
+ import pkgutil
+
+
+class IMetadataProvider:
+
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+
+
+
+
+
+
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class WorkingSet(object):
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry,False)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+
+ def __contains__(self,dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+
+
+
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ raise VersionConflict(dist,req) # XXX add more info
+ else:
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
+ for dist in self:
+ entries = dist.get_entry_map(group)
+ if name is None:
+ for ep in entries.values():
+ yield ep
+ elif name in entries:
+ yield entries[name]
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ for key in self.entry_keys[item]:
+ if key not in seen:
+ seen[key]=1
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set. If it's added, any
+ callbacks registered with the ``subscribe()`` method will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry)
+
+ if entry is None:
+ entry = dist.location
+ keys = self.entry_keys.setdefault(entry,[])
+
+ if dist.key in self.by_key:
+ return # ignore hidden distros
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+
+ self._added_new(dist)
+
+
+ def resolve(self, requirements, env=None, installer=None):
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+ """
+
+ requirements = list(requirements)[::-1] # set up the stack
+ processed = {} # set of processed requirements
+ best = {} # key -> dist
+ to_activate = []
+
+ while requirements:
+ req = requirements.pop(0) # process dependencies breadth-first
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None:
+ if env is None:
+ env = Environment(self.entries)
+ dist = best[req.key] = env.best_match(req, self, installer)
+ if dist is None:
+ raise DistributionNotFound(req) # XXX put more info here
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ raise VersionConflict(dist,req) # XXX put more info here
+ requirements.extend(dist.requires(req.extras)[::-1])
+ processed[req] = True
+
+ return to_activate # return list of distros to activate
+
+ def find_plugins(self,
+ plugin_env, full_env=None, installer=None, fallback=True
+ ):
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ map(working_set.add, distributions) # add plugins+libs to sys.path
+ print "Couldn't load", errors # display errors
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ plugin_projects.sort() # scan project names in alphabetic order
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ map(shadow_set.add, self) # put all our entries in shadow_set
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError,v:
+ error_info[dist] = v # save error info
+ if fallback:
+ continue # try the next older version of project
+ else:
+ break # give up on this project, keep going
+
+ else:
+ map(shadow_set.add, resolvees)
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+
+
+
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
+
+ def subscribe(self, callback):
+ """Invoke `callback` for all distributions (including existing ones)"""
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
+ for dist in self:
+ callback(dist)
+
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+
+
+
+
+
+
+
+
+
+
+class Environment(object):
+ """Searchable snapshot of distributions on a search path"""
+
+ def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
+ optional string naming the desired version of Python (e.g. ``'2.4'``);
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self._cache = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
+ return (self.python is None or dist.py_version is None
+ or dist.py_version==self.python) \
+ and compatible_platforms(dist.platform,self.platform)
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self,project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+ """
+ try:
+ return self._cache[project_name]
+ except KeyError:
+ project_name = project_name.lower()
+ if project_name not in self._distmap:
+ return []
+
+ if project_name not in self._cache:
+ dists = self._cache[project_name] = self._distmap[project_name]
+ _sort_dists(dists)
+
+ return self._cache[project_name]
+
+ def add(self,dist):
+ """Add `dist` if we ``can_add()`` it and it isn't already added"""
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key,[])
+ if dist not in dists:
+ dists.append(dist)
+ if dist.key in self._cache:
+ _sort_dists(self._cache[dist.key])
+
+
+ def best_match(self, req, working_set, installer=None):
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
+ dist = working_set.find(req)
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ return self.obtain(req, installer) # try and download/install
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]: yield key
+
+
+
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other,Distribution):
+ self.add(other)
+ elif isinstance(other,Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+AvailableDistributions = Environment # XXX backward compatibility
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ err = ExtractionError("""Can't extract file(s) to egg cache
+
+The following error occurred while trying to extract file(s) to the Python egg
+cache:
+
+ %s
+
+The Python egg cache directory is currently set to:
+
+ %s
+
+Perhaps your account does not have write access to this directory? You can
+change the cache directory by setting the PYTHON_EGG_CACHE environment
+variable to point to an accessible directory.
+""" % (old_exc, cache_path)
+ )
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
+ target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
+ try:
+ ensure_directory(target_path)
+ except:
+ self.extraction_error()
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+ # XXX
+
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+
+def get_default_cache():
+ """Determine the default cache location
+
+ This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
+ Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
+ "Application Data" directory. On all other systems, it's "~/.python-eggs".
+ """
+ try:
+ return os.environ['PYTHON_EGG_CACHE']
+ except KeyError:
+ pass
+
+ if os.name!='nt':
+ return os.path.expanduser('~/.python-eggs')
+
+ app_data = 'Application Data' # XXX this may be locale-specific!
+ app_homes = [
+ (('APPDATA',), None), # best option, should be locale-safe
+ (('USERPROFILE',), app_data),
+ (('HOMEDRIVE','HOMEPATH'), app_data),
+ (('HOMEPATH',), app_data),
+ (('HOME',), None),
+ (('WINDIR',), app_data), # 95/98/ME
+ ]
+
+ for keys, subdir in app_homes:
+ dirname = ''
+ for key in keys:
+ if key in os.environ:
+ dirname = os.path.join(os.environ[key])
+ else:
+ break
+ else:
+ if subdir:
+ dirname = os.path.join(dirname,subdir)
+ return os.path.join(dirname, 'Python-Eggs')
+ else:
+ raise RuntimeError(
+ "Please set the PYTHON_EGG_CACHE enviroment variable"
+ )
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """Convert an arbitrary string to a standard version string
+
+ Spaces become dots, and all other non-alphanumeric characters become
+ dashes, with runs of multiple dashes condensed to a single dash.
+ """
+ version = version.replace(' ','.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-','_')
+
+
+
+
+
+
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return StringIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
+ def has_metadata(self, name):
+ return self.egg_info and self._has(self._fn(self.egg_info,name))
+
+ def get_metadata(self, name):
+ if not self.egg_info:
+ return ""
+ return self._get(self._fn(self.egg_info,name))
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self,resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self,name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info,name))
+
+
+ def resource_listdir(self,resource_name):
+ return self._listdir(self._fn(self.module_path,resource_name))
+
+ def metadata_listdir(self,name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info,name))
+ return []
+
+ def run_script(self,script_name,namespace):
+ script = 'scripts/'+script_name
+ if not self.has_metadata(script):
+ raise ResolutionError("No script named %r" % script_name)
+ script_text = self.get_metadata(script).replace('\r\n','\n')
+ script_text = script_text.replace('\r','\n')
+ script_filename = self._fn(self.egg_info,script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
+ execfile(script_filename, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
+ script_code = compile(script_text,script_filename,'exec')
+ exec script_code in namespace, namespace
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
+ return os.path.join(base, *resource_name.split('/'))
+
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+register_loader_type(object, NullProvider)
+
+
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self,module):
+ NullProvider.__init__(self,module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
+ # we assume here that our metadata may be nested inside a "basket"
+ # of multiple eggs; that's why we use module_path instead of .archive
+ path = self.module_path
+ old = None
+ while path!=old:
+ if path.lower().endswith('.egg'):
+ self.egg_name = os.path.basename(path)
+ self.egg_info = os.path.join(path, 'EGG-INFO')
+ self.egg_root = path
+ break
+ old = path
+ path, base = os.path.split(path)
+
+
+
+
+
+
+
+
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self,path):
+ return os.path.isdir(path)
+
+ def _listdir(self,path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ stream = open(path, 'rb')
+ try:
+ return stream.read()
+ finally:
+ stream.close()
+
+register_loader_type(type(None), DefaultProvider)
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ _isdir = _has = lambda self,path: False
+ _get = lambda self,path: ''
+ _listdir = lambda self,path: []
+ module_path = None
+
+ def __init__(self):
+ pass
+
+empty_provider = EmptyProvider()
+
+
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+
+ def __init__(self, module):
+ EggProvider.__init__(self,module)
+ self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
+ self.zip_pre = self.loader.archive+os.sep
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath,self.zip_pre)
+ )
+
+ def _parts(self,zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list
+ fspath = self.zip_pre+zip_path # pseudo-fs path
+ if fspath.startswith(self.egg_root+os.sep):
+ return fspath[len(self.egg_root)+1:].split(os.sep)
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath,self.egg_root)
+ )
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ def _extract_resource(self, manager, zip_path):
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ return os.path.dirname(last) # return the extracted directory name
+
+ zip_stat = self.zipinfo[zip_path]
+ t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
+ date_time = (
+ (d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
+ (t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
+ )
+ timestamp = time.mktime(date_time)
+
+ try:
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if os.path.isfile(real_path):
+ stat = os.stat(real_path)
+ if stat.st_size==size and stat.st_mtime==timestamp:
+ # size and stamp match, don't bother extracting
+ return real_path
+
+ outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp,timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ stat = os.stat(real_path)
+
+ if stat.st_size==size and stat.st_mtime==timestamp:
+ # size and stamp match, somebody did it just ahead of
+ # us, so we're done
+ return real_path
+ elif os.name=='nt': # Windows, del old file and retry
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ manager.extraction_error() # report a user-friendly error
+
+ return real_path
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self,fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self,fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self,resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root,resource_name))
+
+ def _resource_to_zip(self,resource_name):
+ return self._zipinfo_name(self._fn(self.module_path,resource_name))
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self,path):
+ self.path = path
+
+ def has_metadata(self,name):
+ return name=='PKG-INFO'
+
+ def get_metadata(self,name):
+ if name=='PKG-INFO':
+ return open(self.path,'rU').read()
+ raise KeyError("No metadata except PKG-INFO is available")
+
+ def get_metadata_lines(self,name):
+ return yield_lines(self.get_metadata(name))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
+ self.zipinfo = zipimport._zip_directory_cache[importer.archive]
+ self.zip_pre = importer.archive+os.sep
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
+
+_distribution_finders = {}
+
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+def find_in_zip(importer, path_item, only=False):
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ return # don't yield nested distros
+ for subitem in metadata.resource_listdir('/'):
+ if subitem.endswith('.egg'):
+ subpath = os.path.join(path_item, subitem)
+ for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
+ yield dist
+
+register_finder(zipimport.zipimporter, find_in_zip)
+
+def StringIO(*args, **kw):
+ """Thunk to load the real StringIO on demand"""
+ global StringIO
+ try:
+ from cStringIO import StringIO
+ except ImportError:
+ from StringIO import StringIO
+ return StringIO(*args,**kw)
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+register_finder(object,find_nothing)
+
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
+ if os.path.isdir(path_item):
+ if path_item.lower().endswith('.egg'):
+ # unpacked egg
+ yield Distribution.from_filename(
+ path_item, metadata=PathMetadata(
+ path_item, os.path.join(path_item,'EGG-INFO')
+ )
+ )
+ else:
+ # scan for .egg and .egg-info in directory
+ for entry in os.listdir(path_item):
+ lower = entry.lower()
+ if lower.endswith('.egg-info'):
+ fullpath = os.path.join(path_item, entry)
+ if os.path.isdir(fullpath):
+ # egg-info directory, allow getting metadata
+ metadata = PathMetadata(path_item, fullpath)
+ else:
+ metadata = FileMetadata(fullpath)
+ yield Distribution.from_location(
+ path_item,entry,metadata,precedence=DEVELOP_DIST
+ )
+ elif not only and lower.endswith('.egg'):
+ for dist in find_distributions(os.path.join(path_item, entry)):
+ yield dist
+ elif not only and lower.endswith('.egg-link'):
+ for line in file(os.path.join(path_item, entry)):
+ if not line.strip(): continue
+ for item in find_distributions(line.rstrip()):
+ yield item
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+_namespace_handlers = {}
+_namespace_packages = {}
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer,path_entry,moduleName,module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
+ loader = importer.find_module(packageName)
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = new.module(packageName)
+ module.__path__ = []; _set_parent_ns(packageName)
+ elif not hasattr(module,'__path__'):
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer,path_item,packageName,module)
+ if subpath is not None:
+ path = module.__path__; path.append(subpath)
+ loader.load_module(packageName); module.__path__ = path
+ return subpath
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
+ path, parent = sys.path, None
+ if '.' in packageName:
+ parent = '.'.join(packageName.split('.')[:-1])
+ declare_namespace(parent)
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
+ except AttributeError:
+ raise TypeError("Not a package:", parent)
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
+ _namespace_packages.setdefault(parent,[]).append(packageName)
+ _namespace_packages.setdefault(packageName,[])
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ imp.release_lock()
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ imp.acquire_lock()
+ try:
+ for package in _namespace_packages.get(parent,()):
+ subpath = _handle_ns(package, path_item)
+ if subpath: fixup_namespace_packages(subpath,package)
+ finally:
+ imp.release_lock()
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
+ if _normalize_cached(item)==normalized:
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+register_namespace_handler(object,null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
+ return os.path.normcase(os.path.realpath(filename))
+
+def _normalize_cached(filename,_cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+ """Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
+ if isinstance(strs,basestring):
+ for s in strs.splitlines():
+ s = s.strip()
+ if s and not s.startswith('#'): # skip blank lines/comments
+ yield s
+ else:
+ for ss in strs:
+ for s in yield_lines(ss):
+ yield s
+
+LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
+CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
+DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
+VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
+COMMA = re.compile(r"\s*,").match # comma between items
+OBRACKET = re.compile(r"\s*\[").match
+CBRACKET = re.compile(r"\s*\]").match
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"(?P<name>[^-]+)"
+ r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
+ re.VERBOSE | re.IGNORECASE
+).match
+
+component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
+replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c'}.get
+
+def _parse_version_parts(s):
+ for part in component_re.split(s):
+ part = replace(part,part)
+ if not part or part=='.':
+ continue
+ if part[:1] in '0123456789':
+ yield part.zfill(8) # pad for numeric comparison
+ else:
+ yield '*'+part
+
+ yield '*final' # ensure that alpha/beta/candidate are before final
+
+def parse_version(s):
+ """Convert a version string to a chronologically-sortable key
+
+ This is a rough cross between distutils' StrictVersion and LooseVersion;
+ if you give it versions that would work with StrictVersion, then it behaves
+ the same; otherwise it acts like a slightly-smarter LooseVersion. It is
+ *possible* to create pathological version coding schemes that will fool
+ this parser, but they should be very rare in practice.
+
+ The returned value will be a tuple of strings. Numeric portions of the
+ version are padded to 8 digits so they will compare numerically, but
+ without relying on how numbers compare relative to strings. Dots are
+ dropped, but dashes are retained. Trailing zeros between alpha segments
+ or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
+ "2.4". Alphanumeric parts are lower-cased.
+
+ The algorithm assumes that strings like "-" and any alpha string that
+ alphabetically follows "final" represents a "patch level". So, "2.4-1"
+ is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
+ considered newer than "2.4-1", whic in turn is newer than "2.4".
+
+ Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
+ come before "final" alphabetically) are assumed to be pre-release versions,
+ so that the version "2.4" is considered newer than "2.4a1".
+
+ Finally, to handle miscellaneous cases, the strings "pre", "preview", and
+ "rc" are treated as if they were "c", i.e. as though they were release
+ candidates, and therefore are not as new as a version string that does not
+ contain them.
+ """
+ parts = []
+ for part in _parse_version_parts(s.lower()):
+ if part.startswith('*'):
+ if part<'*final': # remove '-' before a prerelease tag
+ while parts and parts[-1]=='*final-': parts.pop()
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1]=='00000000':
+ parts.pop()
+ parts.append(part)
+ return tuple(parts)
+
+class EntryPoint(object):
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
+ self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, env=None, installer=None):
+ if require: self.require(env, installer)
+ entry = __import__(self.module_name, globals(),globals(), ['__name__'])
+ for attr in self.attrs:
+ try:
+ entry = getattr(entry,attr)
+ except AttributeError:
+ raise ImportError("%r has no %r attribute" % (entry,attr))
+ return entry
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
+ map(working_set.add,
+ working_set.resolve(self.dist.requires(self.extras),env,installer))
+
+
+
+ #@classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1,extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ try:
+ attrs = extras = ()
+ name,value = src.split('=',1)
+ if '[' in value:
+ value,extras = value.split('[',1)
+ req = Requirement.parse("x["+extras)
+ if req.specs: raise ValueError
+ extras = req.extras
+ if ':' in value:
+ value,attrs = value.split(':',1)
+ if not MODULE(attrs.rstrip()):
+ raise ValueError
+ attrs = attrs.rstrip().split('.')
+ except ValueError:
+ raise ValueError(
+ "EntryPoint must be in 'name=module:attrs [extras]' format",
+ src
+ )
+ else:
+ return cls(name.strip(), value.lstrip(), attrs, extras, dist)
+
+ parse = classmethod(parse)
+
+
+
+
+
+
+
+
+ #@classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
+ this[ep.name]=ep
+ return this
+
+ parse_group = classmethod(parse_group)
+
+ #@classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data,dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+ parse_map = classmethod(parse_map)
+
+
+
+
+
+
+class Distribution(object):
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ def __init__(self,
+ location=None, metadata=None, project_name=None, version=None,
+ py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
+ ):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ #@classmethod
+ def from_location(cls,location,basename,metadata=None,**kw):
+ project_name, version, py_version, platform = [None]*4
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in (".egg",".egg-info"):
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name','ver','pyver','plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )
+ from_location = classmethod(from_location)
+
+ hashcmp = property(
+ lambda self: (
+ getattr(self,'parsed_version',()), self.precedence, self.key,
+ -len(self.location or ''), self.location, self.py_version,
+ self.platform
+ )
+ )
+ def __cmp__(self, other): return cmp(self.hashcmp, other)
+ def __hash__(self): return hash(self.hashcmp)
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ #@property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+ key = property(key)
+
+ #@property
+ def parsed_version(self):
+ try:
+ return self._parsed_version
+ except AttributeError:
+ self._parsed_version = pv = parse_version(self.version)
+ return pv
+
+ parsed_version = property(parsed_version)
+
+ #@property
+ def version(self):
+ try:
+ return self._version
+ except AttributeError:
+ for line in self._get_metadata('PKG-INFO'):
+ if line.lower().startswith('version:'):
+ self._version = safe_version(line.split(':',1)[1].strip())
+ return self._version
+ else:
+ raise ValueError(
+ "Missing 'Version:' header and/or PKG-INFO file", self
+ )
+ version = property(version)
+
+
+
+
+ #@property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ dm = self.__dep_map = {None: []}
+ for name in 'requires.txt', 'depends.txt':
+ for extra,reqs in split_sections(self._get_metadata(name)):
+ if extra: extra = safe_extra(extra)
+ dm.setdefault(extra,[]).extend(parse_requirements(reqs))
+ return dm
+ _dep_map = property(_dep_map)
+
+ def requires(self,extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None,()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
+ except KeyError:
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
+ )
+ return deps
+
+ def _get_metadata(self,name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
+ def activate(self,path=None):
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None: path = sys.path
+ self.insert_on(path)
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules: declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-'+self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self,self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try: version = getattr(self,'version',None)
+ except ValueError: version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name,version)
+
+ def __getattr__(self,attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError,attr
+ return getattr(self._provider, attr)
+
+ #@classmethod
+ def from_filename(cls,filename,metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+ from_filename = classmethod(from_filename)
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ return Requirement.parse('%s==%s' % (self.project_name, self.version))
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group,name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group,name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
+ return ep_map.get(group,{})
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
+ def insert_on(self, path, loc = None):
+ """Insert self.location in path before its nearest parent directory"""
+ loc = loc or self.location
+ if not loc: return
+ if path is sys.path:
+ self.check_version_conflict()
+ best, pos = 0, -1
+ for p,item in enumerate(path):
+ item = _normalize_cached(item)
+ if loc.startswith(item) and len(item)>best and loc<>item:
+ best, pos = len(item), p
+ if pos==-1:
+ if loc not in path: path.append(loc)
+ elif loc not in path[:pos+1]:
+ while loc in path: path.remove(loc)
+ path.insert(pos,loc)
+
+
+ def check_version_conflict(self):
+ if self.key=='setuptools':
+ return # ignore the inevitable setuptools self-conflicts :(
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages
+ ):
+ continue
+
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and normalize_path(fn).startswith(loc):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for "+repr(self))
+ return False
+ return True
+
+ def clone(self,**kw):
+ """Copy this distribution, substituting in any changed keyword args"""
+ for attr in (
+ 'project_name', 'version', 'py_version', 'platform', 'location',
+ 'precedence'
+ ):
+ kw.setdefault(attr, getattr(self,attr,None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+
+
+
+ #@property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+ extras = property(extras)
+
+
+def issue_warning(*args,**kw):
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ from warnings import warn
+ warn(stacklevel = level+1, *args, **kw)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be an instance of ``basestring``, or a (possibly-nested)
+ iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
+
+ items = []
+
+ while not TERMINATOR(line,p):
+ if CONTINUE(line,p):
+ try:
+ line = lines.next(); p = 0
+ except StopIteration:
+ raise ValueError(
+ "\\ must not appear on the last nonblank line"
+ )
+
+ match = ITEM(line,p)
+ if not match:
+ raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
+
+ items.append(match.group(*groups))
+ p = match.end()
+
+ match = COMMA(line,p)
+ if match:
+ p = match.end() # skip the comma
+ elif not TERMINATOR(line,p):
+ raise ValueError(
+ "Expected ',' or end-of-list in",line,"at",line[p:]
+ )
+
+ match = TERMINATOR(line,p)
+ if match: p = match.end() # skip the terminator, if any
+ return line, p, items
+
+ for line in lines:
+ match = DISTRO(line)
+ if not match:
+ raise ValueError("Missing distribution spec", line)
+ project_name = match.group(1)
+ p = match.end()
+ extras = []
+
+ match = OBRACKET(line,p)
+ if match:
+ p = match.end()
+ line, p, extras = scan_list(
+ DISTRO, CBRACKET, line, p, (1,), "'extra' name"
+ )
+
+ line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
+ specs = [(op,safe_version(val)) for op,val in specs]
+ yield Requirement(project_name, specs, extras)
+
+
+def _sort_dists(dists):
+ tmp = [(dist.hashcmp,dist) for dist in dists]
+ tmp.sort()
+ dists[::-1] = [d for hc,d in tmp]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class Requirement:
+ def __init__(self, project_name, specs, extras):
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ self.unsafe_name, project_name = project_name, safe_name(project_name)
+ self.project_name, self.key = project_name, project_name.lower()
+ index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
+ index.sort()
+ self.specs = [(op,ver) for parsed,trans,op,ver in index]
+ self.index, self.extras = index, tuple(map(safe_extra,extras))
+ self.hashCmp = (
+ self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
+ ImmutableSet(self.extras)
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __str__(self):
+ specs = ','.join([''.join(s) for s in self.specs])
+ extras = ','.join(self.extras)
+ if extras: extras = '[%s]' % extras
+ return '%s%s%s' % (self.project_name, extras, specs)
+
+ def __eq__(self,other):
+ return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
+
+ def __contains__(self,item):
+ if isinstance(item,Distribution):
+ if item.key <> self.key: return False
+ if self.index: item = item.parsed_version # only get if we need it
+ elif isinstance(item,basestring):
+ item = parse_version(item)
+ last = None
+ for parsed,trans,op,ver in self.index:
+ action = trans[cmp(item,parsed)]
+ if action=='F': return False
+ elif action=='T': return True
+ elif action=='+': last = True
+ elif action=='-' or last is None: last = False
+ if last is None: last = True # no rules encountered
+ return last
+
+
+ def __hash__(self):
+ return self.__hash
+
+ def __repr__(self): return "Requirement.parse(%r)" % str(self)
+
+ #@staticmethod
+ def parse(s):
+ reqs = list(parse_requirements(s))
+ if reqs:
+ if len(reqs)==1:
+ return reqs[0]
+ raise ValueError("Expected only one requirement", s)
+ raise ValueError("No requirements found", s)
+
+ parse = staticmethod(parse)
+
+state_machine = {
+ # =><
+ '<' : '--T',
+ '<=': 'T-T',
+ '>' : 'F+F',
+ '>=': 'T+F',
+ '==': 'T..',
+ '!=': 'F++',
+}
+
+
+def _get_mro(cls):
+ """Get an mro for a type or classic class"""
+ if not isinstance(cls,type):
+ class cls(cls,object): pass
+ return cls.__mro__[1:]
+ return cls.__mro__
+
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
+ for t in _get_mro(getattr(ob, '__class__', type(ob))):
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section,content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
+def _mkstemp(*args,**kw):
+ from tempfile import mkstemp
+ old_open = os.open
+ try:
+ os.open = os_open # temporarily bypass sandboxing
+ return mkstemp(*args,**kw)
+ finally:
+ os.open = old_open # and then put it back
+
+
+# Set up global resource manager
+_manager = ResourceManager()
+def _initialize(g):
+ for name in dir(_manager):
+ if not name.startswith('_'):
+ g[name] = getattr(_manager, name)
+_initialize(globals())
+
+# Prepare the master working set and make the ``require()`` API available
+working_set = WorkingSet()
+try:
+ # Does the main program list any requirements?
+ from __main__ import __requires__
+except ImportError:
+ pass # No: just use the default working set based on sys.path
+else:
+ # Yes: ensure the requirements are met, by prefixing sys.path if necessary
+ try:
+ working_set.require(__requires__)
+ except VersionConflict: # try it without defaults already on sys.path
+ working_set = WorkingSet([]) # by starting with an empty path
+ for dist in working_set.resolve(
+ parse_requirements(__requires__), Environment()
+ ):
+ working_set.add(dist)
+ for entry in sys.path: # add any missing entries from sys.path
+ if entry not in working_set.entries:
+ working_set.add_entry(entry)
+ sys.path[:] = working_set.entries # then copy back to sys.path
+
+require = working_set.require
+iter_entry_points = working_set.iter_entry_points
+add_activation_listener = working_set.subscribe
+run_script = working_set.run_script
+run_main = run_script # backward compatibility
+# Activate all distributions already on sys.path, and ensure that
+# all distributions added to the working set in the future (e.g. by
+# calling ``require()``) will get activated as well.
+add_activation_listener(lambda dist: dist.activate())
+working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
diff --git a/Lib/pkgutil.py b/Lib/pkgutil.py
index fbd708a..7316892 100644
--- a/Lib/pkgutil.py
+++ b/Lib/pkgutil.py
@@ -1,7 +1,432 @@
"""Utilities to support packages."""
+# NOTE: This module must remain compatible with Python 2.3, as it is shared
+# by setuptools for distribution with Python 2.3 and up.
+
import os
import sys
+import imp
+import os.path
+from types import ModuleType
+
+__all__ = [
+ 'get_importer', 'iter_importers', 'get_loader', 'find_loader',
+ 'walk_packages', 'iter_modules',
+ 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
+]
+
+def read_code(stream):
+ # This helper is needed in order for the PEP 302 emulation to
+ # correctly handle compiled files
+ import marshal
+
+ magic = stream.read(4)
+ if magic != imp.get_magic():
+ return None
+
+ stream.read(4) # Skip timestamp
+ return marshal.load(stream)
+
+
+def simplegeneric(func):
+ """Make a trivial single-dispatch generic function"""
+ registry = {}
+ def wrapper(*args,**kw):
+ ob = args[0]
+ try:
+ cls = ob.__class__
+ except AttributeError:
+ cls = type(ob)
+ try:
+ mro = cls.__mro__
+ except AttributeError:
+ try:
+ class cls(cls,object): pass
+ mro = cls.__mro__[1:]
+ except TypeError:
+ mro = object, # must be an ExtensionClass or some such :(
+ for t in mro:
+ if t in registry:
+ return registry[t](*args,**kw)
+ else:
+ return func(*args,**kw)
+ try:
+ wrapper.__name__ = func.__name__
+ except (TypeError,AttributeError):
+ pass # Python 2.3 doesn't allow functions to be renamed
+
+ def register(typ, func=None):
+ if func is None:
+ return lambda f: register(typ, f)
+ registry[typ] = func
+ return func
+
+ wrapper.__dict__ = func.__dict__
+ wrapper.__doc__ = func.__doc__
+ wrapper.register = register
+ return wrapper
+
+
+def walk_packages(path=None, prefix='', onerror=None):
+ """Yield submodule names+loaders recursively, for path or sys.path"""
+
+ def seen(p,m={}):
+ if p in m: return True
+ m[p] = True
+
+ for importer, name, ispkg in iter_modules(path, prefix):
+ yield importer, name, ispkg
+
+ if ispkg:
+ try:
+ __import__(name)
+ except ImportError:
+ if onerror is not None:
+ onerror()
+ else:
+ path = getattr(sys.modules[name], '__path__', None) or []
+
+ # don't traverse path items we've seen before
+ path = [p for p in path if not seen(p)]
+
+ for item in walk_packages(path, name+'.'):
+ yield item
+
+
+def iter_modules(path=None, prefix=''):
+ """Yield submodule names+loaders for path or sys.path"""
+ if path is None:
+ importers = iter_importers()
+ else:
+ importers = map(get_importer, path)
+
+ yielded = {}
+ for i in importers:
+ for name, ispkg in iter_importer_modules(i, prefix):
+ if name not in yielded:
+ yielded[name] = 1
+ yield i, name, ispkg
+
+
+#@simplegeneric
+def iter_importer_modules(importer, prefix=''):
+ if not hasattr(importer,'iter_modules'):
+ return []
+ return importer.iter_modules(prefix)
+
+iter_importer_modules = simplegeneric(iter_importer_modules)
+
+
+class ImpImporter:
+ """PEP 302 Importer that wraps Python's "classic" import algorithm
+
+ ImpImporter(dirname) produces a PEP 302 importer that searches that
+ directory. ImpImporter(None) produces a PEP 302 importer that searches
+ the current sys.path, plus any modules that are frozen or built-in.
+
+ Note that ImpImporter does not currently support being used by placement
+ on sys.meta_path.
+ """
+
+ def __init__(self, path=None):
+ self.path = path
+
+ def find_module(self, fullname, path=None):
+ # Note: we ignore 'path' argument since it is only used via meta_path
+ subname = fullname.split(".")[-1]
+ if subname != fullname and self.path is None:
+ return None
+ if self.path is None:
+ path = None
+ else:
+ path = [os.path.realpath(self.path)]
+ try:
+ file, filename, etc = imp.find_module(subname, path)
+ except ImportError:
+ return None
+ return ImpLoader(fullname, file, filename, etc)
+
+ def iter_modules(self, prefix=''):
+ if self.path is None or not os.path.isdir(self.path):
+ return
+
+ yielded = {}
+ import inspect
+
+ filenames = os.listdir(self.path)
+ filenames.sort() # handle packages before same-named modules
+
+ for fn in filenames:
+ modname = inspect.getmodulename(fn)
+ if modname=='__init__' or modname in yielded:
+ continue
+
+ path = os.path.join(self.path, fn)
+ ispkg = False
+
+ if not modname and os.path.isdir(path) and '.' not in fn:
+ modname = fn
+ for fn in os.listdir(path):
+ subname = inspect.getmodulename(fn)
+ if subname=='__init__':
+ ispkg = True
+ break
+ else:
+ continue # not a package
+
+ if modname and '.' not in modname:
+ yielded[modname] = 1
+ yield prefix + modname, ispkg
+
+
+class ImpLoader:
+ """PEP 302 Loader that wraps Python's "classic" import algorithm
+ """
+ code = source = None
+
+ def __init__(self, fullname, file, filename, etc):
+ self.file = file
+ self.filename = filename
+ self.fullname = fullname
+ self.etc = etc
+
+ def load_module(self, fullname):
+ self._reopen()
+ try:
+ mod = imp.load_module(fullname, self.file, self.filename, self.etc)
+ finally:
+ if self.file:
+ self.file.close()
+ # Note: we don't set __loader__ because we want the module to look
+ # normal; i.e. this is just a wrapper for standard import machinery
+ return mod
+
+ def get_data(self, pathname):
+ return open(pathname, "rb").read()
+
+ def _reopen(self):
+ if self.file and self.file.closed:
+ if mod_type==imp.PY_SOURCE:
+ self.file = open(self.filename, 'rU')
+ elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
+ self.file = open(self.filename, 'rb')
+
+ def _fix_name(self, fullname):
+ if fullname is None:
+ fullname = self.fullname
+ elif fullname != self.fullname:
+ raise ImportError("Loader for module %s cannot handle "
+ "module %s" % (self.fullname, fullname))
+ return fullname
+
+ def is_package(self, fullname):
+ fullname = self._fix_name(fullname)
+ return self.etc[2]==imp.PKG_DIRECTORY
+
+ def get_code(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ if self.code is None:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ source = self.get_source(fullname)
+ self.code = compile(source, self.filename, 'exec')
+ elif mod_type==imp.PY_COMPILED:
+ self._reopen()
+ try:
+ self.code = read_code(self.file)
+ finally:
+ self.file.close()
+ elif mod_type==imp.PKG_DIRECTORY:
+ self.code = self._get_delegate().get_code()
+ return self.code
+
+ def get_source(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ if self.source is None:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ self._reopen()
+ try:
+ self.source = self.file.read()
+ finally:
+ self.file.close()
+ elif mod_type==imp.PY_COMPILED:
+ if os.path.exists(self.filename[:-1]):
+ f = open(self.filename[:-1], 'rU')
+ self.source = f.read()
+ f.close()
+ elif mod_type==imp.PKG_DIRECTORY:
+ self.source = self._get_delegate().get_source()
+ return self.source
+
+
+ def _get_delegate(self):
+ return ImpImporter(self.filename).find_module('__init__')
+
+ def get_filename(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ mod_type = self.etc[2]
+ if self.etc[2]==imp.PKG_DIRECTORY:
+ return self._get_delegate().get_filename()
+ elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
+ return self.filename
+ return None
+
+
+try:
+ import zipimport
+ from zipimport import zipimporter
+
+ def iter_zipimport_modules(importer, prefix=''):
+ dirlist = zipimport._zip_directory_cache[importer.archive].keys()
+ dirlist.sort()
+ _prefix = importer.prefix
+ plen = len(_prefix)
+ yielded = {}
+ import inspect
+ for fn in dirlist:
+ if not fn.startswith(_prefix):
+ continue
+
+ fn = fn[plen:].split(os.sep)
+
+ if len(fn)==2 and fn[1].startswith('__init__.py'):
+ if fn[0] not in yielded:
+ yielded[fn[0]] = 1
+ yield fn[0], True
+
+ if len(fn)!=1:
+ continue
+
+ modname = inspect.getmodulename(fn[0])
+ if modname=='__init__':
+ continue
+
+ if modname and '.' not in modname and modname not in yielded:
+ yielded[modname] = 1
+ yield prefix + modname, False
+
+ iter_importer_modules.register(zipimporter, iter_zipimport_modules)
+
+except ImportError:
+ pass
+
+
+def get_importer(path_item):
+ """Retrieve a PEP 302 importer for the given path item
+
+ The returned importer is cached in sys.path_importer_cache
+ if it was newly created by a path hook.
+
+ If there is no importer, a wrapper around the basic import
+ machinery is returned. This wrapper is never inserted into
+ the importer cache (None is inserted instead).
+
+ The cache (or part of it) can be cleared manually if a
+ rescan of sys.path_hooks is necessary.
+ """
+ try:
+ importer = sys.path_importer_cache[path_item]
+ except KeyError:
+ for path_hook in sys.path_hooks:
+ try:
+ importer = path_hook(path_item)
+ break
+ except ImportError:
+ pass
+ else:
+ importer = None
+ sys.path_importer_cache.setdefault(path_item,importer)
+
+ if importer is None:
+ try:
+ importer = ImpImporter(path_item)
+ except ImportError:
+ pass
+ return importer
+
+
+def iter_importers(fullname=""):
+ """Yield PEP 302 importers for the given module name
+
+ If fullname contains a '.', the importers will be for the package
+ containing fullname, otherwise they will be importers for sys.meta_path,
+ sys.path, and Python's "classic" import machinery, in that order. If
+ the named module is in a package, that package is imported as a side
+ effect of invoking this function.
+
+ Non PEP 302 mechanisms (e.g. the Windows registry) used by the
+ standard import machinery to find files in alternative locations
+ are partially supported, but are searched AFTER sys.path. Normally,
+ these locations are searched BEFORE sys.path, preventing sys.path
+ entries from shadowing them.
+
+ For this to cause a visible difference in behaviour, there must
+ be a module or package name that is accessible via both sys.path
+ and one of the non PEP 302 file system mechanisms. In this case,
+ the emulation will find the former version, while the builtin
+ import mechanism will find the latter.
+
+ Items of the following types can be affected by this discrepancy:
+ imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
+ """
+ if fullname.startswith('.'):
+ raise ImportError("Relative module names not supported")
+ if '.' in fullname:
+ # Get the containing package's __path__
+ pkg = '.'.join(fullname.split('.')[:-1])
+ if pkg not in sys.modules:
+ __import__(pkg)
+ path = getattr(sys.modules[pkg],'__path__',None) or []
+ else:
+ for importer in sys.meta_path:
+ yield importer
+ path = sys.path
+ for item in path:
+ yield get_importer(item)
+ if '.' not in fullname:
+ yield ImpImporter()
+
+def get_loader(module_or_name):
+ """Get a PEP 302 "loader" object for module_or_name
+
+ If the module or package is accessible via the normal import
+ mechanism, a wrapper around the relevant part of that machinery
+ is returned. Returns None if the module cannot be found or imported.
+ If the named module is not already imported, its containing package
+ (if any) is imported, in order to establish the package __path__.
+
+ This function uses iter_importers(), and is thus subject to the same
+ limitations regarding platform-specific special import locations such
+ as the Windows registry.
+ """
+ if module_or_name in sys.modules:
+ module_or_name = sys.modules[module_or_name]
+ if isinstance(module_or_name, ModuleType):
+ module = module_or_name
+ loader = getattr(module,'__loader__',None)
+ if loader is not None:
+ return loader
+ fullname = module.__name__
+ else:
+ fullname = module_or_name
+ return find_loader(fullname)
+
+def find_loader(fullname):
+ """Find a PEP 302 "loader" object for fullname
+
+ If fullname contains dots, path must be the containing package's __path__.
+ Returns None if the module cannot be found or imported. This function uses
+ iter_importers(), and is thus subject to the same limitations regarding
+ platform-specific special import locations such as the Windows registry.
+ """
+ for importer in iter_importers(fullname):
+ loader = importer.find_module(fullname)
+ if loader is not None:
+ return loader
+
+ return None
+
def extend_path(path, name):
"""Extend a package's path.
diff --git a/Lib/plat-mac/applesingle.py b/Lib/plat-mac/applesingle.py
index b035d9e..76bdb06 100644
--- a/Lib/plat-mac/applesingle.py
+++ b/Lib/plat-mac/applesingle.py
@@ -25,7 +25,7 @@ class Error(ValueError):
pass
# File header format: magic, version, unused, number of entries
-AS_HEADER_FORMAT=">ll16sh"
+AS_HEADER_FORMAT=">LL16sh"
AS_HEADER_LENGTH=26
# The flag words for AppleSingle
AS_MAGIC=0x00051600
diff --git a/Lib/platform.py b/Lib/platform.py
index 62fdaf4..288bc95 100755
--- a/Lib/platform.py
+++ b/Lib/platform.py
@@ -607,7 +607,8 @@ def mac_ver(release='',versioninfo=('','',''),machine=''):
versioninfo = (version,stage,nonrel)
if sysa:
machine = {0x1: '68k',
- 0x2: 'PowerPC'}.get(sysa,'')
+ 0x2: 'PowerPC',
+ 0xa: 'i386'}.get(sysa,'')
return release,versioninfo,machine
def _java_getprop(name,default):
diff --git a/Lib/popen2.py b/Lib/popen2.py
index 54543be..67ebd26 100644
--- a/Lib/popen2.py
+++ b/Lib/popen2.py
@@ -20,7 +20,13 @@ _active = []
def _cleanup():
for inst in _active[:]:
- inst.poll()
+ if inst.poll(_deadstate=sys.maxint) >= 0:
+ try:
+ _active.remove(inst)
+ except ValueError:
+ # This can happen if two threads create a new Popen instance.
+ # It's harmless that it was already removed, so ignore.
+ pass
class Popen3:
"""Class representing a child process. Normally instances are created
@@ -39,6 +45,7 @@ class Popen3:
specified, it specifies the size of the I/O buffers to/from the child
process."""
_cleanup()
+ self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
@@ -60,7 +67,13 @@ class Popen3:
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
- _active.append(self)
+
+ def __del__(self):
+ # In case the child hasn't been waited on, check if it's done.
+ self.poll(_deadstate=sys.maxint)
+ if self.sts < 0:
+ # Child is still running, keep us alive until we can wait on it.
+ _active.append(self)
def _run_child(self, cmd):
if isinstance(cmd, basestring):
@@ -75,26 +88,28 @@ class Popen3:
finally:
os._exit(1)
- def poll(self):
+ def poll(self, _deadstate=None):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
+ # pid will be 0 if self.pid hasn't terminated
if pid == self.pid:
self.sts = sts
- _active.remove(self)
except os.error:
- pass
+ if _deadstate is not None:
+ self.sts = _deadstate
return self.sts
def wait(self):
"""Wait for and return the exit status of the child process."""
if self.sts < 0:
pid, sts = os.waitpid(self.pid, 0)
- if pid == self.pid:
- self.sts = sts
- _active.remove(self)
+ # This used to be a test, but it is believed to be
+ # always true, so I changed it to an assertion - mvl
+ assert pid == self.pid
+ self.sts = sts
return self.sts
@@ -103,6 +118,7 @@ class Popen4(Popen3):
def __init__(self, cmd, bufsize=-1):
_cleanup()
+ self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
@@ -116,7 +132,6 @@ class Popen4(Popen3):
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
- _active.append(self)
if sys.platform[:3] == "win" or sys.platform == "os2emx":
@@ -186,6 +201,9 @@ else:
__all__.extend(["Popen3", "Popen4"])
def _test():
+ # When the test runs, there shouldn't be any open pipes
+ _cleanup()
+ assert not _active, "Active pipes when test starts " + repr([c.cmd for c in _active])
cmd = "cat"
teststr = "ab cd\n"
if os.name == "nt":
@@ -216,6 +234,7 @@ def _test():
raise ValueError("unexpected %r on stderr" % (got,))
for inst in _active[:]:
inst.wait()
+ _cleanup()
if _active:
raise ValueError("_active not empty")
print "All OK"
diff --git a/Lib/pstats.py b/Lib/pstats.py
index 930cc6d..c3a8828 100644
--- a/Lib/pstats.py
+++ b/Lib/pstats.py
@@ -32,6 +32,7 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+import sys
import os
import time
import marshal
@@ -58,18 +59,31 @@ class Stats:
printed.
The sort_stats() method now processes some additional options (i.e., in
- addition to the old -1, 0, 1, or 2). It takes an arbitrary number of quoted
- strings to select the sort order. For example sort_stats('time', 'name')
- sorts on the major key of "internal function time", and on the minor
- key of 'the name of the function'. Look at the two tables in sort_stats()
- and get_sort_arg_defs(self) for more examples.
+ addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
+ quoted strings to select the sort order. For example sort_stats('time',
+ 'name') sorts on the major key of 'internal function time', and on the
+ minor key of 'the name of the function'. Look at the two tables in
+ sort_stats() and get_sort_arg_defs(self) for more examples.
- All methods now return "self", so you can string together commands like:
+ All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
- def __init__(self, *args):
+ def __init__(self, *args, **kwds):
+ # I can't figure out how to explictly specify a stream keyword arg
+ # with *args:
+ # def __init__(self, *args, stream=sys.stdout): ...
+ # so I use **kwds and sqauwk if something unexpected is passed in.
+ self.stream = sys.stdout
+ if "stream" in kwds:
+ self.stream = kwds["stream"]
+ del kwds["stream"]
+ if kwds:
+ keys = kwds.keys()
+ keys.sort()
+ extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
+ raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
@@ -96,9 +110,9 @@ class Stats:
trouble = 0
finally:
if trouble:
- print "Invalid timing data",
- if self.files: print self.files[-1],
- print
+ print >> self.stream, "Invalid timing data",
+ if self.files: print >> self.stream, self.files[-1],
+ print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
@@ -320,7 +334,7 @@ class Stats:
if not list:
return 0, list
- print msg
+ print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in list:
@@ -330,24 +344,24 @@ class Stats:
def print_stats(self, *amount):
for filename in self.files:
- print filename
- if self.files: print
+ print >> self.stream, filename
+ if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
- print indent, func_get_function_name(func)
+ print >> self.stream, indent, func_get_function_name(func)
- print indent, self.total_calls, "function calls",
+ print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
- print "(%d primitive calls)" % self.prim_calls,
- print "in %.3f CPU seconds" % self.total_tt
- print
+ print >> self.stream, "(%d primitive calls)" % self.prim_calls,
+ print >> self.stream, "in %.3f CPU seconds" % self.total_tt
+ print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
- print
- print
+ print >> self.stream
+ print >> self.stream
return self
def print_callees(self, *amount):
@@ -361,8 +375,8 @@ class Stats:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
- print
- print
+ print >> self.stream
+ print >> self.stream
return self
def print_callers(self, *amount):
@@ -372,12 +386,12 @@ class Stats:
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
- print
- print
+ print >> self.stream
+ print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
- print "Function ".ljust(name_size) + column_title
+ print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
@@ -386,12 +400,12 @@ class Stats:
subheader = isinstance(value, tuple)
break
if subheader:
- print " "*name_size + " ncalls tottime cumtime"
+ print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
- print func_std_string(source).ljust(name_size) + arrow,
+ print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
- print
+ print >> self.stream
return
clist = call_dict.keys()
clist.sort()
@@ -411,30 +425,30 @@ class Stats:
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
- print indent*left_width + substats
+ print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
- print ' ncalls tottime percall cumtime percall', \
- 'filename:lineno(function)'
+ print >> self.stream, ' ncalls tottime percall cumtime percall',
+ print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
- print c.rjust(9),
- print f8(tt),
+ print >> self.stream, c.rjust(9),
+ print >> self.stream, f8(tt),
if nc == 0:
- print ' '*8,
+ print >> self.stream, ' '*8,
else:
- print f8(tt/nc),
- print f8(ct),
+ print >> self.stream, f8(tt/nc),
+ print >> self.stream, f8(ct),
if cc == 0:
- print ' '*8,
+ print >> self.stream, ' '*8,
else:
- print f8(ct/cc),
- print func_std_string(func)
+ print >> self.stream, f8(ct/cc),
+ print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
@@ -549,7 +563,7 @@ if __name__ == '__main__':
try:
frac = float(term)
if frac > 1 or frac < 0:
- print "Fraction argument mus be in [0, 1]"
+ print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
@@ -559,93 +573,93 @@ if __name__ == '__main__':
if self.stats:
getattr(self.stats, fn)(*processed)
else:
- print "No statistics object is loaded."
+ print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
- print "Arguments may be:"
- print "* An integer maximum number of entries to print."
- print "* A decimal fractional number between 0 and 1, controlling"
- print " what fraction of selected entries to print."
- print "* A regular expression; only entries with function names"
- print " that match it are printed."
+ print >> self.stream, "Arguments may be:"
+ print >> self.stream, "* An integer maximum number of entries to print."
+ print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
+ print >> self.stream, " what fraction of selected entries to print."
+ print >> self.stream, "* A regular expression; only entries with function names"
+ print >> self.stream, " that match it are printed."
def do_add(self, line):
self.stats.add(line)
return 0
def help_add(self):
- print "Add profile info from given file to current statistics object."
+ print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
- print "Print callees statistics from the current stat object."
+ print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
- print "Print callers statistics from the current stat object."
+ print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
- print ""
+ print >> self.stream, ""
return 1
def help_EOF(self):
- print "Leave the profile brower."
+ print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
- print "Leave the profile brower."
+ print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
- print args[1]
+ print >> self.stream, args[1]
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[-2:]
else:
- print "No statistics object is current -- cannot reload."
+ print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
- print "Read in profile data from a specified file."
+ print >> self.stream, "Read in profile data from a specified file."
def do_reverse(self, line):
self.stats.reverse_order()
return 0
def help_reverse(self):
- print "Reverse the sort order of the profiling report."
+ print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
abbrevs = self.stats.get_sort_arg_defs()
if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
self.stats.sort_stats(*line.split())
else:
- print "Valid sort keys (unique prefixes are accepted):"
+ print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
- print "%s -- %s" % (key, value[1])
+ print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
- print "Sort profile data according to specified keys."
- print "(Typing `sort' without arguments lists valid keys.)"
+ print >> self.stream, "Sort profile data according to specified keys."
+ print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
- print "Print statistics from the current stat object."
+ print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
self.stats.strip_dirs()
return 0
def help_strip(self):
- print "Strip leading path information from filenames in the report."
+ print >> self.stream, "Strip leading path information from filenames in the report."
def postcmd(self, stop, line):
if stop:
@@ -653,14 +667,14 @@ if __name__ == '__main__':
return None
import sys
- print "Welcome to the profile statistics browser."
+ print >> self.stream, "Welcome to the profile statistics browser."
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
ProfileBrowser(initprofile).cmdloop()
- print "Goodbye."
+ print >> self.stream, "Goodbye."
except KeyboardInterrupt:
pass
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index b6afc7f..cf38630 100755
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -52,10 +52,16 @@ Richard Chamberlain, for the first implementation of textdoc.
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
-import sys, imp, os, re, types, inspect, __builtin__
+import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
-from collections import deque
+try:
+ from collections import deque
+except ImportError:
+ # Python 2.3 compatibility
+ class deque(list):
+ def popleft(self):
+ return self.pop(0)
# --------------------------------------------------------- common routines
@@ -182,6 +188,23 @@ def ispackage(path):
return True
return False
+def source_synopsis(file):
+ line = file.readline()
+ while line[:1] == '#' or not strip(line):
+ line = file.readline()
+ if not line: break
+ line = strip(line)
+ if line[:4] == 'r"""': line = line[1:]
+ if line[:3] == '"""':
+ line = line[3:]
+ if line[-1:] == '\\': line = line[:-1]
+ while not strip(line):
+ line = file.readline()
+ if not line: break
+ result = strip(split(line, '"""')[0])
+ else: result = None
+ return result
+
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
@@ -196,24 +219,11 @@ def synopsis(filename, cache={}):
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
- result = split(module.__doc__ or '', '\n')[0]
+ result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
- line = file.readline()
- while line[:1] == '#' or not strip(line):
- line = file.readline()
- if not line: break
- line = strip(line)
- if line[:4] == 'r"""': line = line[1:]
- if line[:3] == '"""':
- line = line[3:]
- if line[-1:] == '\\': line = line[:-1]
- while not strip(line):
- line = file.readline()
- if not line: break
- result = strip(split(line, '"""')[0])
- else: result = None
- file.close()
+ result = source_synopsis(file)
+ file.close()
cache[filename] = (mtime, result)
return result
@@ -643,16 +653,8 @@ class HTMLDoc(Doc):
if hasattr(object, '__path__'):
modpkgs = []
- modnames = []
- for file in os.listdir(object.__path__[0]):
- path = os.path.join(object.__path__[0], file)
- modname = inspect.getmodulename(file)
- if modname != '__init__':
- if modname and modname not in modnames:
- modpkgs.append((modname, name, 0, 0))
- modnames.append(modname)
- elif ispackage(path):
- modpkgs.append((file, name, 1, 0))
+ for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
+ modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
@@ -796,7 +798,10 @@ class HTMLDoc(Doc):
tag += ':<br>\n'
# Sort attrs by name.
- attrs.sort(key=lambda t: t[0])
+ try:
+ attrs.sort(key=lambda t: t[0])
+ except TypeError:
+ attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
@@ -914,25 +919,9 @@ class HTMLDoc(Doc):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
- seen = {}
- files = os.listdir(dir)
-
- def found(name, ispackage,
- modpkgs=modpkgs, shadowed=shadowed, seen=seen):
- if name not in seen:
- modpkgs.append((name, '', ispackage, name in shadowed))
- seen[name] = 1
- shadowed[name] = 1
-
- # Package spam/__init__.py takes precedence over module spam.py.
- for file in files:
- path = os.path.join(dir, file)
- if ispackage(path): found(file, 1)
- for file in files:
- path = os.path.join(dir, file)
- if os.path.isfile(path):
- modname = inspect.getmodulename(file)
- if modname: found(modname, 0)
+ for importer, name, ispkg in pkgutil.iter_modules([dir]):
+ modpkgs.append((name, '', ispkg, name in shadowed))
+ shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
@@ -1059,14 +1048,12 @@ class TextDoc(Doc):
if hasattr(object, '__path__'):
modpkgs = []
- for file in os.listdir(object.__path__[0]):
- path = os.path.join(object.__path__[0], file)
- modname = inspect.getmodulename(file)
- if modname != '__init__':
- if modname and modname not in modpkgs:
- modpkgs.append(modname)
- elif ispackage(path):
- modpkgs.append(file + ' (package)')
+ for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
+ if ispkg:
+ modpkgs.append(modname + ' (package)')
+ else:
+ modpkgs.append(modname)
+
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
@@ -1490,20 +1477,9 @@ def writedoc(thing, forceload=0):
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
- for file in os.listdir(dir):
- path = os.path.join(dir, file)
- if ispackage(path):
- writedocs(path, pkgpath + file + '.', done)
- elif os.path.isfile(path):
- modname = inspect.getmodulename(path)
- if modname:
- if modname == '__init__':
- modname = pkgpath[:-1] # remove trailing period
- else:
- modname = pkgpath + modname
- if modname not in done:
- done[modname] = 1
- writedoc(modname)
+ for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
+ writedoc(modname)
+ return
def raw_input(prompt):
sys.stdout.write(prompt)
@@ -1835,30 +1811,9 @@ class Scanner:
self.state.append((child, self.children(child)))
return child
-class ModuleScanner(Scanner):
- """An interruptible scanner that searches module synopses."""
- def __init__(self):
- roots = map(lambda dir: (dir, ''), pathdirs())
- Scanner.__init__(self, roots, self.submodules, self.isnewpackage)
- self.inodes = map(lambda (dir, pkg): os.stat(dir).st_ino, roots)
-
- def submodules(self, (dir, package)):
- children = []
- for file in os.listdir(dir):
- path = os.path.join(dir, file)
- if ispackage(path):
- children.append((path, package + (package and '.') + file))
- else:
- children.append((path, package))
- children.sort() # so that spam.py comes before spam.pyc or spam.pyo
- return children
- def isnewpackage(self, (dir, package)):
- inode = os.path.exists(dir) and os.stat(dir).st_ino
- if not (os.path.islink(dir) and inode in self.inodes):
- self.inodes.append(inode) # detect circular symbolic links
- return ispackage(dir)
- return False
+class ModuleScanner:
+ """An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None):
if key: key = lower(key)
@@ -1875,22 +1830,31 @@ class ModuleScanner(Scanner):
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
- while not self.quit:
- node = self.next()
- if not node: break
- path, package = node
- modname = inspect.getmodulename(path)
- if os.path.isfile(path) and modname:
- modname = package + (package and '.') + modname
- if not modname in seen:
- seen[modname] = 1 # if we see spam.py, skip spam.pyc
- if key is None:
- callback(path, modname, '')
+ for importer, modname, ispkg in pkgutil.walk_packages():
+ if self.quit:
+ break
+ if key is None:
+ callback(None, modname, '')
+ else:
+ loader = importer.find_module(modname)
+ if hasattr(loader,'get_source'):
+ import StringIO
+ desc = source_synopsis(
+ StringIO.StringIO(loader.get_source(modname))
+ ) or ''
+ if hasattr(loader,'get_filename'):
+ path = loader.get_filename(modname)
else:
- desc = synopsis(path) or ''
- if find(lower(modname + ' - ' + desc), key) >= 0:
- callback(path, modname, desc)
- if completer: completer()
+ path = None
+ else:
+ module = loader.load_module(modname)
+ desc = (module.__doc__ or '').splitlines()[0]
+ path = getattr(module,'__file__',None)
+ if find(lower(modname + ' - ' + desc), key) >= 0:
+ callback(path, modname, desc)
+
+ if completer:
+ completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
@@ -1955,7 +1919,7 @@ def serve(port, callback=None, completer=None):
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
- for dir in pathdirs():
+ for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
diff --git a/Lib/random.py b/Lib/random.py
index b4ad2b3..465f477 100644
--- a/Lib/random.py
+++ b/Lib/random.py
@@ -285,6 +285,15 @@ class Random(_random.Random):
large population: sample(xrange(10000000), 60)
"""
+ # XXX Although the documentation says `population` is "a sequence",
+ # XXX attempts are made to cater to any iterable with a __len__
+ # XXX method. This has had mixed success. Examples from both
+ # XXX sides: sets work fine, and should become officially supported;
+ # XXX dicts are much harder, and have failed in various subtle
+ # XXX ways across attempts. Support for mapping types should probably
+ # XXX be dropped (and users should pass mapping.keys() or .values()
+ # XXX explicitly).
+
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
@@ -304,7 +313,9 @@ class Random(_random.Random):
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
- if n <= setsize: # is an n-length list smaller than a k-length set
+ if n <= setsize or hasattr(population, "keys"):
+ # An n-length list is smaller than a k-length set, or this is a
+ # mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
@@ -312,17 +323,18 @@ class Random(_random.Random):
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
- n > 0 and (population[0], population[n//2], population[n-1])
- except (TypeError, KeyError): # handle non-sequence iterables
- population = tuple(population)
- selected = set()
- selected_add = selected.add
- for i in xrange(k):
- j = _int(random() * n)
- while j in selected:
+ selected = set()
+ selected_add = selected.add
+ for i in xrange(k):
j = _int(random() * n)
- selected_add(j)
- result[i] = population[j]
+ while j in selected:
+ j = _int(random() * n)
+ selected_add(j)
+ result[i] = population[j]
+ except (TypeError, KeyError): # handle (at least) sets
+ if isinstance(population, list):
+ raise
+ return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
diff --git a/Lib/reconvert.py b/Lib/reconvert.py
deleted file mode 100755
index 64bab5b..0000000
--- a/Lib/reconvert.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#! /usr/bin/env python
-
-r"""Convert old ("regex") regular expressions to new syntax ("re").
-
-When imported as a module, there are two functions, with their own
-strings:
-
- convert(s, syntax=None) -- convert a regex regular expression to re syntax
-
- quote(s) -- return a quoted string literal
-
-When used as a script, read a Python string literal (or any other
-expression evaluating to a string) from stdin, and write the
-translated expression to stdout as a string literal. Unless stdout is
-a tty, no trailing \n is written to stdout. This is done so that it
-can be used with Emacs C-U M-| (shell-command-on-region with argument
-which filters the region through the shell command).
-
-No attempt has been made at coding for performance.
-
-Translation table...
-
- \( ( (unless RE_NO_BK_PARENS set)
- \) ) (unless RE_NO_BK_PARENS set)
- \| | (unless RE_NO_BK_VBAR set)
- \< \b (not quite the same, but alla...)
- \> \b (not quite the same, but alla...)
- \` \A
- \' \Z
-
-Not translated...
-
- .
- ^
- $
- *
- + (unless RE_BK_PLUS_QM set, then to \+)
- ? (unless RE_BK_PLUS_QM set, then to \?)
- \
- \b
- \B
- \w
- \W
- \1 ... \9
-
-Special cases...
-
- Non-printable characters are always replaced by their 3-digit
- escape code (except \t, \n, \r, which use mnemonic escapes)
-
- Newline is turned into | when RE_NEWLINE_OR is set
-
-XXX To be done...
-
- [...] (different treatment of backslashed items?)
- [^...] (different treatment of backslashed items?)
- ^ $ * + ? (in some error contexts these are probably treated differently)
- \vDD \DD (in the regex docs but only works when RE_ANSI_HEX set)
-
-"""
-
-
-import warnings
-warnings.filterwarnings("ignore", ".* regex .*", DeprecationWarning, __name__,
- append=1)
-
-import regex
-from regex_syntax import * # RE_*
-
-__all__ = ["convert","quote"]
-
-# Default translation table
-mastertable = {
- r'\<': r'\b',
- r'\>': r'\b',
- r'\`': r'\A',
- r'\'': r'\Z',
- r'\(': '(',
- r'\)': ')',
- r'\|': '|',
- '(': r'\(',
- ')': r'\)',
- '|': r'\|',
- '\t': r'\t',
- '\n': r'\n',
- '\r': r'\r',
-}
-
-
-def convert(s, syntax=None):
- """Convert a regex regular expression to re syntax.
-
- The first argument is the regular expression, as a string object,
- just like it would be passed to regex.compile(). (I.e., pass the
- actual string object -- string quotes must already have been
- removed and the standard escape processing has already been done,
- e.g. by eval().)
-
- The optional second argument is the regex syntax variant to be
- used. This is an integer mask as passed to regex.set_syntax();
- the flag bits are defined in regex_syntax. When not specified, or
- when None is given, the current regex syntax mask (as retrieved by
- regex.get_syntax()) is used -- which is 0 by default.
-
- The return value is a regular expression, as a string object that
- could be passed to re.compile(). (I.e., no string quotes have
- been added -- use quote() below, or repr().)
-
- The conversion is not always guaranteed to be correct. More
- syntactical analysis should be performed to detect borderline
- cases and decide what to do with them. For example, 'x*?' is not
- translated correctly.
-
- """
- table = mastertable.copy()
- if syntax is None:
- syntax = regex.get_syntax()
- if syntax & RE_NO_BK_PARENS:
- del table[r'\('], table[r'\)']
- del table['('], table[')']
- if syntax & RE_NO_BK_VBAR:
- del table[r'\|']
- del table['|']
- if syntax & RE_BK_PLUS_QM:
- table['+'] = r'\+'
- table['?'] = r'\?'
- table[r'\+'] = '+'
- table[r'\?'] = '?'
- if syntax & RE_NEWLINE_OR:
- table['\n'] = '|'
- res = ""
-
- i = 0
- end = len(s)
- while i < end:
- c = s[i]
- i = i+1
- if c == '\\':
- c = s[i]
- i = i+1
- key = '\\' + c
- key = table.get(key, key)
- res = res + key
- else:
- c = table.get(c, c)
- res = res + c
- return res
-
-
-def quote(s, quote=None):
- """Convert a string object to a quoted string literal.
-
- This is similar to repr() but will return a "raw" string (r'...'
- or r"...") when the string contains backslashes, instead of
- doubling all backslashes. The resulting string does *not* always
- evaluate to the same string as the original; however it will do
- just the right thing when passed into re.compile().
-
- The optional second argument forces the string quote; it must be
- a single character which is a valid Python string quote.
-
- """
- if quote is None:
- q = "'"
- altq = "'"
- if q in s and altq not in s:
- q = altq
- else:
- assert quote in ('"', "'", '"""', "'''")
- q = quote
- res = q
- for c in s:
- if c == q: c = '\\' + c
- elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
- res = res + c
- res = res + q
- if '\\' in res:
- res = 'r' + res
- return res
-
-
-def main():
- """Main program -- called when run as a script."""
- import sys
- s = eval(sys.stdin.read())
- sys.stdout.write(quote(convert(s)))
- if sys.stdout.isatty():
- sys.stdout.write("\n")
-
-
-if __name__ == '__main__':
- main()
diff --git a/Lib/regex_syntax.py b/Lib/regex_syntax.py
deleted file mode 100644
index b0a0dbf..0000000
--- a/Lib/regex_syntax.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""Constants for selecting regexp syntaxes for the obsolete regex module.
-
-This module is only for backward compatibility. "regex" has now
-been replaced by the new regular expression module, "re".
-
-These bits are passed to regex.set_syntax() to choose among
-alternative regexp syntaxes.
-"""
-
-# 1 means plain parentheses serve as grouping, and backslash
-# parentheses are needed for literal searching.
-# 0 means backslash-parentheses are grouping, and plain parentheses
-# are for literal searching.
-RE_NO_BK_PARENS = 1
-
-# 1 means plain | serves as the "or"-operator, and \| is a literal.
-# 0 means \| serves as the "or"-operator, and | is a literal.
-RE_NO_BK_VBAR = 2
-
-# 0 means plain + or ? serves as an operator, and \+, \? are literals.
-# 1 means \+, \? are operators and plain +, ? are literals.
-RE_BK_PLUS_QM = 4
-
-# 1 means | binds tighter than ^ or $.
-# 0 means the contrary.
-RE_TIGHT_VBAR = 8
-
-# 1 means treat \n as an _OR operator
-# 0 means treat it as a normal character
-RE_NEWLINE_OR = 16
-
-# 0 means that a special characters (such as *, ^, and $) always have
-# their special meaning regardless of the surrounding context.
-# 1 means that special characters may act as normal characters in some
-# contexts. Specifically, this applies to:
-# ^ - only special at the beginning, or after ( or |
-# $ - only special at the end, or before ) or |
-# *, +, ? - only special when not after the beginning, (, or |
-RE_CONTEXT_INDEP_OPS = 32
-
-# ANSI sequences (\n etc) and \xhh
-RE_ANSI_HEX = 64
-
-# No GNU extensions
-RE_NO_GNU_EXTENSIONS = 128
-
-# Now define combinations of bits for the standard possibilities.
-RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
-RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
-RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
-RE_SYNTAX_EMACS = 0
-
-# (Python's obsolete "regexp" module used a syntax similar to awk.)
diff --git a/Lib/regsub.py b/Lib/regsub.py
deleted file mode 100644
index 0fc10a5..0000000
--- a/Lib/regsub.py
+++ /dev/null
@@ -1,198 +0,0 @@
-"""Regexp-based split and replace using the obsolete regex module.
-
-This module is only for backward compatibility. These operations
-are now provided by the new regular expression module, "re".
-
-sub(pat, repl, str): replace first occurrence of pattern in string
-gsub(pat, repl, str): replace all occurrences of pattern in string
-split(str, pat, maxsplit): split string using pattern as delimiter
-splitx(str, pat, maxsplit): split string using pattern as delimiter plus
- return delimiters
-"""
-
-import warnings
-warnings.warn("the regsub module is deprecated; please use re.sub()",
- DeprecationWarning)
-
-# Ignore further deprecation warnings about this module
-warnings.filterwarnings("ignore", "", DeprecationWarning, __name__)
-
-import regex
-
-__all__ = ["sub","gsub","split","splitx","capwords"]
-
-# Replace first occurrence of pattern pat in string str by replacement
-# repl. If the pattern isn't found, the string is returned unchanged.
-# The replacement may contain references \digit to subpatterns and
-# escaped backslashes. The pattern may be a string or an already
-# compiled pattern.
-
-def sub(pat, repl, str):
- prog = compile(pat)
- if prog.search(str) >= 0:
- regs = prog.regs
- a, b = regs[0]
- str = str[:a] + expand(repl, regs, str) + str[b:]
- return str
-
-
-# Replace all (non-overlapping) occurrences of pattern pat in string
-# str by replacement repl. The same rules as for sub() apply.
-# Empty matches for the pattern are replaced only when not adjacent to
-# a previous match, so e.g. gsub('', '-', 'abc') returns '-a-b-c-'.
-
-def gsub(pat, repl, str):
- prog = compile(pat)
- new = ''
- start = 0
- first = 1
- while prog.search(str, start) >= 0:
- regs = prog.regs
- a, b = regs[0]
- if a == b == start and not first:
- if start >= len(str) or prog.search(str, start+1) < 0:
- break
- regs = prog.regs
- a, b = regs[0]
- new = new + str[start:a] + expand(repl, regs, str)
- start = b
- first = 0
- new = new + str[start:]
- return new
-
-
-# Split string str in fields separated by delimiters matching pattern
-# pat. Only non-empty matches for the pattern are considered, so e.g.
-# split('abc', '') returns ['abc'].
-# The optional 3rd argument sets the number of splits that are performed.
-
-def split(str, pat, maxsplit = 0):
- return intsplit(str, pat, maxsplit, 0)
-
-# Split string str in fields separated by delimiters matching pattern
-# pat. Only non-empty matches for the pattern are considered, so e.g.
-# split('abc', '') returns ['abc']. The delimiters are also included
-# in the list.
-# The optional 3rd argument sets the number of splits that are performed.
-
-
-def splitx(str, pat, maxsplit = 0):
- return intsplit(str, pat, maxsplit, 1)
-
-# Internal function used to implement split() and splitx().
-
-def intsplit(str, pat, maxsplit, retain):
- prog = compile(pat)
- res = []
- start = next = 0
- splitcount = 0
- while prog.search(str, next) >= 0:
- regs = prog.regs
- a, b = regs[0]
- if a == b:
- next = next + 1
- if next >= len(str):
- break
- else:
- res.append(str[start:a])
- if retain:
- res.append(str[a:b])
- start = next = b
- splitcount = splitcount + 1
- if (maxsplit and (splitcount >= maxsplit)):
- break
- res.append(str[start:])
- return res
-
-
-# Capitalize words split using a pattern
-
-def capwords(str, pat='[^a-zA-Z0-9_]+'):
- words = splitx(str, pat)
- for i in range(0, len(words), 2):
- words[i] = words[i].capitalize()
- return "".join(words)
-
-
-# Internal subroutines:
-# compile(pat): compile a pattern, caching already compiled patterns
-# expand(repl, regs, str): expand \digit escapes in replacement string
-
-
-# Manage a cache of compiled regular expressions.
-#
-# If the pattern is a string a compiled version of it is returned. If
-# the pattern has been used before we return an already compiled
-# version from the cache; otherwise we compile it now and save the
-# compiled version in the cache, along with the syntax it was compiled
-# with. Instead of a string, a compiled regular expression can also
-# be passed.
-
-cache = {}
-
-def compile(pat):
- if type(pat) != type(''):
- return pat # Assume it is a compiled regex
- key = (pat, regex.get_syntax())
- if key in cache:
- prog = cache[key] # Get it from the cache
- else:
- prog = cache[key] = regex.compile(pat)
- return prog
-
-
-def clear_cache():
- global cache
- cache = {}
-
-
-# Expand \digit in the replacement.
-# Each occurrence of \digit is replaced by the substring of str
-# indicated by regs[digit]. To include a literal \ in the
-# replacement, double it; other \ escapes are left unchanged (i.e.
-# the \ and the following character are both copied).
-
-def expand(repl, regs, str):
- if '\\' not in repl:
- return repl
- new = ''
- i = 0
- ord0 = ord('0')
- while i < len(repl):
- c = repl[i]; i = i+1
- if c != '\\' or i >= len(repl):
- new = new + c
- else:
- c = repl[i]; i = i+1
- if '0' <= c <= '9':
- a, b = regs[ord(c)-ord0]
- new = new + str[a:b]
- elif c == '\\':
- new = new + c
- else:
- new = new + '\\' + c
- return new
-
-
-# Test program, reads sequences "pat repl str" from stdin.
-# Optional argument specifies pattern used to split lines.
-
-def test():
- import sys
- if sys.argv[1:]:
- delpat = sys.argv[1]
- else:
- delpat = '[ \t\n]+'
- while 1:
- if sys.stdin.isatty(): sys.stderr.write('--> ')
- line = sys.stdin.readline()
- if not line: break
- if line[-1] == '\n': line = line[:-1]
- fields = split(line, delpat)
- if len(fields) != 3:
- print 'Sorry, not three fields'
- print 'split:', repr(fields)
- continue
- [pat, repl, str] = split(line, delpat)
- print 'sub :', repr(sub(pat, repl, str))
- print 'gsub:', repr(gsub(pat, repl, str))
diff --git a/Lib/rexec.py b/Lib/rexec.py
index ed01d24..10e4bc0 100644
--- a/Lib/rexec.py
+++ b/Lib/rexec.py
@@ -136,7 +136,7 @@ class RExec(ihooks._Verbose):
ok_builtin_modules = ('audioop', 'array', 'binascii',
'cmath', 'errno', 'imageop',
'marshal', 'math', 'md5', 'operator',
- 'parser', 'regex', 'select',
+ 'parser', 'select',
'sha', '_sre', 'strop', 'struct', 'time',
'_weakref')
diff --git a/Lib/runpy.py b/Lib/runpy.py
index afb0098..8290dfe 100755
--- a/Lib/runpy.py
+++ b/Lib/runpy.py
@@ -11,349 +11,15 @@ importers when locating support scripts as well as when importing modules.
import sys
import imp
+try:
+ from imp import get_loader
+except ImportError:
+ from pkgutil import get_loader
__all__ = [
"run_module",
]
-try:
- _get_loader = imp.get_loader
-except AttributeError:
- # get_loader() is not provided by the imp module, so emulate it
- # as best we can using the PEP 302 import machinery exposed since
- # Python 2.3. The emulation isn't perfect, but the differences
- # in the way names are shadowed shouldn't matter in practice.
- import os.path
- import marshal # Handle compiled Python files
-
- # This helper is needed in order for the PEP 302 emulation to
- # correctly handle compiled files
- def _read_compiled_file(compiled_file):
- magic = compiled_file.read(4)
- if magic != imp.get_magic():
- return None
- try:
- compiled_file.read(4) # Skip timestamp
- return marshal.load(compiled_file)
- except Exception:
- return None
-
- class _AbsoluteImporter(object):
- """PEP 302 importer wrapper for top level import machinery"""
- def find_module(self, mod_name, path=None):
- if path is not None:
- return None
- try:
- file, filename, mod_info = imp.find_module(mod_name)
- except ImportError:
- return None
- suffix, mode, mod_type = mod_info
- if mod_type == imp.PY_SOURCE:
- loader = _SourceFileLoader(mod_name, file,
- filename, mod_info)
- elif mod_type == imp.PY_COMPILED:
- loader = _CompiledFileLoader(mod_name, file,
- filename, mod_info)
- elif mod_type == imp.PKG_DIRECTORY:
- loader = _PackageDirLoader(mod_name, file,
- filename, mod_info)
- elif mod_type == imp.C_EXTENSION:
- loader = _FileSystemLoader(mod_name, file,
- filename, mod_info)
- else:
- loader = _BasicLoader(mod_name, file,
- filename, mod_info)
- return loader
-
-
- class _FileSystemImporter(object):
- """PEP 302 importer wrapper for filesystem based imports"""
- def __init__(self, path_item=None):
- if path_item is not None:
- if path_item != '' and not os.path.isdir(path_item):
- raise ImportError("%s is not a directory" % path_item)
- self.path_dir = path_item
- else:
- raise ImportError("Filesystem importer requires "
- "a directory name")
-
- def find_module(self, mod_name, path=None):
- if path is not None:
- return None
- path_dir = self.path_dir
- if path_dir == '':
- path_dir = os.getcwd()
- sub_name = mod_name.rsplit(".", 1)[-1]
- try:
- file, filename, mod_info = imp.find_module(sub_name,
- [path_dir])
- except ImportError:
- return None
- if not filename.startswith(path_dir):
- return None
- suffix, mode, mod_type = mod_info
- if mod_type == imp.PY_SOURCE:
- loader = _SourceFileLoader(mod_name, file,
- filename, mod_info)
- elif mod_type == imp.PY_COMPILED:
- loader = _CompiledFileLoader(mod_name, file,
- filename, mod_info)
- elif mod_type == imp.PKG_DIRECTORY:
- loader = _PackageDirLoader(mod_name, file,
- filename, mod_info)
- elif mod_type == imp.C_EXTENSION:
- loader = _FileSystemLoader(mod_name, file,
- filename, mod_info)
- else:
- loader = _BasicLoader(mod_name, file,
- filename, mod_info)
- return loader
-
-
- class _BasicLoader(object):
- """PEP 302 loader wrapper for top level import machinery"""
- def __init__(self, mod_name, file, filename, mod_info):
- self.mod_name = mod_name
- self.file = file
- self.filename = filename
- self.mod_info = mod_info
-
- def _fix_name(self, mod_name):
- if mod_name is None:
- mod_name = self.mod_name
- elif mod_name != self.mod_name:
- raise ImportError("Loader for module %s cannot handle "
- "module %s" % (self.mod_name, mod_name))
- return mod_name
-
- def load_module(self, mod_name=None):
- mod_name = self._fix_name(mod_name)
- mod = imp.load_module(mod_name, self.file,
- self.filename, self.mod_info)
- mod.__loader__ = self # for introspection
- return mod
-
- def get_code(self, mod_name=None):
- return None
-
- def get_source(self, mod_name=None):
- return None
-
- def is_package(self, mod_name=None):
- return False
-
- def close(self):
- if self.file:
- self.file.close()
-
- def __del__(self):
- self.close()
-
-
- class _FileSystemLoader(_BasicLoader):
- """PEP 302 loader wrapper for filesystem based imports"""
- def get_code(self, mod_name=None):
- mod_name = self._fix_name(mod_name)
- return self._get_code(mod_name)
-
- def get_data(self, pathname):
- return open(pathname, "rb").read()
-
- def get_filename(self, mod_name=None):
- mod_name = self._fix_name(mod_name)
- return self._get_filename(mod_name)
-
- def get_source(self, mod_name=None):
- mod_name = self._fix_name(mod_name)
- return self._get_source(mod_name)
-
- def is_package(self, mod_name=None):
- mod_name = self._fix_name(mod_name)
- return self._is_package(mod_name)
-
- def _get_code(self, mod_name):
- return None
-
- def _get_filename(self, mod_name):
- return self.filename
-
- def _get_source(self, mod_name):
- return None
-
- def _is_package(self, mod_name):
- return False
-
- class _PackageDirLoader(_FileSystemLoader):
- """PEP 302 loader wrapper for PKG_DIRECTORY directories"""
- def _is_package(self, mod_name):
- return True
-
-
- class _SourceFileLoader(_FileSystemLoader):
- """PEP 302 loader wrapper for PY_SOURCE modules"""
- def _get_code(self, mod_name):
- return compile(self._get_source(mod_name),
- self.filename, 'exec')
-
- def _get_source(self, mod_name):
- f = self.file
- f.seek(0)
- return f.read()
-
-
- class _CompiledFileLoader(_FileSystemLoader):
- """PEP 302 loader wrapper for PY_COMPILED modules"""
- def _get_code(self, mod_name):
- f = self.file
- f.seek(0)
- return _read_compiled_file(f)
-
-
- def _get_importer(path_item):
- """Retrieve a PEP 302 importer for the given path item
-
- The returned importer is cached in sys.path_importer_cache
- if it was newly created by a path hook.
-
- If there is no importer, a wrapper around the basic import
- machinery is returned. This wrapper is never inserted into
- the importer cache (None is inserted instead).
-
- The cache (or part of it) can be cleared manually if a
- rescan of sys.path_hooks is necessary.
- """
- try:
- importer = sys.path_importer_cache[path_item]
- except KeyError:
- for path_hook in sys.path_hooks:
- try:
- importer = path_hook(path_item)
- break
- except ImportError:
- pass
- else:
- importer = None
- sys.path_importer_cache[path_item] = importer
- if importer is None:
- try:
- importer = _FileSystemImporter(path_item)
- except ImportError:
- pass
- return importer
-
-
- def _get_path_loader(mod_name, path=None):
- """Retrieve a PEP 302 loader using a path importer"""
- if path is None:
- path = sys.path
- absolute_loader = _AbsoluteImporter().find_module(mod_name)
- if isinstance(absolute_loader, _FileSystemLoader):
- # Found in filesystem, so scan path hooks
- # before accepting this one as the right one
- loader = None
- else:
- # Not found in filesystem, so use top-level loader
- loader = absolute_loader
- else:
- loader = absolute_loader = None
- if loader is None:
- for path_item in path:
- importer = _get_importer(path_item)
- if importer is not None:
- loader = importer.find_module(mod_name)
- if loader is not None:
- # Found a loader for our module
- break
- else:
- # No path hook found, so accept the top level loader
- loader = absolute_loader
- return loader
-
- def _get_package(pkg_name):
- """Retrieve a named package"""
- pkg = __import__(pkg_name)
- sub_pkg_names = pkg_name.split(".")
- for sub_pkg in sub_pkg_names[1:]:
- pkg = getattr(pkg, sub_pkg)
- return pkg
-
- def _get_loader(mod_name, path=None):
- """Retrieve a PEP 302 loader for the given module or package
-
- If the module or package is accessible via the normal import
- mechanism, a wrapper around the relevant part of that machinery
- is returned.
-
- Non PEP 302 mechanisms (e.g. the Windows registry) used by the
- standard import machinery to find files in alternative locations
- are partially supported, but are searched AFTER sys.path. Normally,
- these locations are searched BEFORE sys.path, preventing sys.path
- entries from shadowing them.
- For this to cause a visible difference in behaviour, there must
- be a module or package name that is accessible via both sys.path
- and one of the non PEP 302 file system mechanisms. In this case,
- the emulation will find the former version, while the builtin
- import mechanism will find the latter.
- Items of the following types can be affected by this discrepancy:
- imp.C_EXTENSION
- imp.PY_SOURCE
- imp.PY_COMPILED
- imp.PKG_DIRECTORY
- """
- try:
- loader = sys.modules[mod_name].__loader__
- except (KeyError, AttributeError):
- loader = None
- if loader is None:
- imp.acquire_lock()
- try:
- # Module not in sys.modules, or uses an unhooked loader
- parts = mod_name.rsplit(".", 1)
- if len(parts) == 2:
- # Sub package, so use parent package's path
- pkg_name, sub_name = parts
- if pkg_name and pkg_name[0] != '.':
- if path is not None:
- raise ImportError("Path argument must be None "
- "for a dotted module name")
- pkg = _get_package(pkg_name)
- try:
- path = pkg.__path__
- except AttributeError:
- raise ImportError(pkg_name +
- " is not a package")
- else:
- raise ImportError("Relative import syntax is not "
- "supported by _get_loader()")
- else:
- # Top level module, so stick with default path
- sub_name = mod_name
-
- for importer in sys.meta_path:
- loader = importer.find_module(mod_name, path)
- if loader is not None:
- # Found a metahook to handle the module
- break
- else:
- # Handling via the standard path mechanism
- loader = _get_path_loader(mod_name, path)
- finally:
- imp.release_lock()
- return loader
-
-
-# This helper is needed due to a missing component in the PEP 302
-# loader protocol (specifically, "get_filename" is non-standard)
-def _get_filename(loader, mod_name):
- try:
- get_filename = loader.get_filename
- except AttributeError:
- return None
- else:
- return get_filename(mod_name)
-
-# ------------------------------------------------------------
-# Done with the import machinery emulation, on with the code!
def _run_code(code, run_globals, init_globals,
mod_name, mod_fname, mod_loader):
@@ -379,21 +45,17 @@ def _run_module_code(code, init_globals=None,
restore_module = mod_name in sys.modules
if restore_module:
saved_module = sys.modules[mod_name]
- imp.acquire_lock()
+ sys.argv[0] = mod_fname
+ sys.modules[mod_name] = temp_module
try:
- sys.argv[0] = mod_fname
- sys.modules[mod_name] = temp_module
- try:
- _run_code(code, mod_globals, init_globals,
- mod_name, mod_fname, mod_loader)
- finally:
- sys.argv[0] = saved_argv0
- if restore_module:
- sys.modules[mod_name] = saved_module
- else:
- del sys.modules[mod_name]
+ _run_code(code, mod_globals, init_globals,
+ mod_name, mod_fname, mod_loader)
finally:
- imp.release_lock()
+ sys.argv[0] = saved_argv0
+ if restore_module:
+ sys.modules[mod_name] = saved_module
+ else:
+ del sys.modules[mod_name]
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
@@ -403,13 +65,24 @@ def _run_module_code(code, init_globals=None,
mod_name, mod_fname, mod_loader)
+# This helper is needed due to a missing component in the PEP 302
+# loader protocol (specifically, "get_filename" is non-standard)
+def _get_filename(loader, mod_name):
+ try:
+ get_filename = loader.get_filename
+ except AttributeError:
+ return None
+ else:
+ return get_filename(mod_name)
+
+
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
- loader = _get_loader(mod_name)
+ loader = get_loader(mod_name)
if loader is None:
raise ImportError("No module named " + mod_name)
code = loader.get_code(mod_name)
diff --git a/Lib/setuptools.egg-info/PKG-INFO b/Lib/setuptools.egg-info/PKG-INFO
new file mode 100644
index 0000000..ff5c1a1
--- /dev/null
+++ b/Lib/setuptools.egg-info/PKG-INFO
@@ -0,0 +1,89 @@
+Metadata-Version: 1.0
+Name: setuptools
+Version: 0.7a1dev-r45536
+Summary: Download, build, install, upgrade, and uninstall Python packages -- easily!
+Home-page: http://peak.telecommunity.com/DevCenter/setuptools
+Author: Phillip J. Eby
+Author-email: peak@eby-sarna.com
+License: PSF or ZPL
+Description: ``setuptools`` is a collection of enhancements to the Python ``distutils``
+ (for Python 2.3.5 and up on most platforms; 64-bit platforms require a minimum
+ of Python 2.4) that allow you to more easily build and distribute Python
+ packages, especially ones that have dependencies on other packages.
+
+ Packages built and distributed using ``setuptools`` look to the user like
+ ordinary Python packages based on the ``distutils``. Your users don't need to
+ install or even know about setuptools in order to use them, and you don't
+ have to include the entire setuptools package in your distributions. By
+ including just a single `bootstrap module`_ (an 8K .py file), your package will
+ automatically download and install ``setuptools`` if the user is building your
+ package from source and doesn't have a suitable version already installed.
+
+ .. _bootstrap module: http://peak.telecommunity.com/dist/ez_setup.py
+
+ Feature Highlights:
+
+ * Automatically find/download/install/upgrade dependencies at build time using
+ the `EasyInstall tool <http://peak.telecommunity.com/DevCenter/EasyInstall>`_,
+ which supports downloading via HTTP, FTP, Subversion, and SourceForge, and
+ automatically scans web pages linked from PyPI to find download links. (It's
+ the closest thing to CPAN currently available for Python.)
+
+ * Create `Python Eggs <http://peak.telecommunity.com/DevCenter/PythonEggs>`_ -
+ a single-file importable distribution format
+
+ * Include data files inside your package directories, where your code can
+ actually use them. (Python 2.4 distutils also supports this feature, but
+ setuptools provides the feature for Python 2.3 packages also, and supports
+ accessing data files in zipped packages too.)
+
+ * Automatically include all packages in your source tree, without listing them
+ individually in setup.py
+
+ * Automatically include all relevant files in your source distributions,
+ without needing to create a ``MANIFEST.in`` file, and without having to force
+ regeneration of the ``MANIFEST`` file when your source tree changes.
+
+ * Automatically generate wrapper scripts or Windows (console and GUI) .exe
+ files for any number of "main" functions in your project. (Note: this is not
+ a py2exe replacement; the .exe files rely on the local Python installation.)
+
+ * Transparent Pyrex support, so that your setup.py can list ``.pyx`` files and
+ still work even when the end-user doesn't have Pyrex installed (as long as
+ you include the Pyrex-generated C in your source distribution)
+
+ * Command aliases - create project-specific, per-user, or site-wide shortcut
+ names for commonly used commands and options
+
+ * PyPI upload support - upload your source distributions and eggs to PyPI
+
+ * Deploy your project in "development mode", such that it's available on
+ ``sys.path``, yet can still be edited directly from its source checkout.
+
+ * Easily extend the distutils with new commands or ``setup()`` arguments, and
+ distribute/reuse your extensions for multiple projects, without copying code.
+
+ * Create extensible applications and frameworks that automatically discover
+ extensions, using simple "entry points" declared in a project's setup script.
+
+ In addition to the PyPI downloads, the development version of ``setuptools``
+ is available from the `Python SVN sandbox`_, and in-development versions of the
+ `0.6 branch`_ are available as well.
+
+ .. _0.6 branch: http://svn.python.org/projects/sandbox/branches/setuptools-0.6/#egg=setuptools-dev06
+
+ .. _Python SVN sandbox: http://svn.python.org/projects/sandbox/trunk/setuptools/#egg=setuptools-dev
+
+
+Keywords: CPAN PyPI distutils eggs package management
+Platform: UNKNOWN
+Classifier: Development Status :: 3 - Alpha
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Python Software Foundation License
+Classifier: License :: OSI Approved :: Zope Public License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
diff --git a/Lib/setuptools.egg-info/entry_points.txt b/Lib/setuptools.egg-info/entry_points.txt
new file mode 100755
index 0000000..0afe2cb
--- /dev/null
+++ b/Lib/setuptools.egg-info/entry_points.txt
@@ -0,0 +1,51 @@
+[distutils.setup_keywords]
+dependency_links = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+extras_require = setuptools.dist:check_extras
+package_data = setuptools.dist:check_package_data
+install_requires = setuptools.dist:check_requirements
+include_package_data = setuptools.dist:assert_bool
+exclude_package_data = setuptools.dist:check_package_data
+namespace_packages = setuptools.dist:check_nsp
+test_suite = setuptools.dist:check_test_suite
+eager_resources = setuptools.dist:assert_string_list
+zip_safe = setuptools.dist:assert_bool
+test_loader = setuptools.dist:check_importable
+tests_require = setuptools.dist:check_requirements
+
+[setuptools.file_finders]
+svn_cvs = setuptools.command.sdist:_default_revctrl
+
+[egg_info.writers]
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+depends.txt = setuptools.command.egg_info:warn_depends_obsolete
+
+[console_scripts]
+easy_install = setuptools.command.easy_install:main
+easy_install-2.5 = setuptools.command.easy_install:main
+
+[distutils.commands]
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+rotate = setuptools.command.rotate:rotate
+develop = setuptools.command.develop:develop
+setopt = setuptools.command.setopt:setopt
+build_py = setuptools.command.build_py:build_py
+saveopts = setuptools.command.saveopts:saveopts
+egg_info = setuptools.command.egg_info:egg_info
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+alias = setuptools.command.alias:alias
+easy_install = setuptools.command.easy_install:easy_install
+install_scripts = setuptools.command.install_scripts:install_scripts
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+install = setuptools.command.install:install
+test = setuptools.command.test:test
+install_lib = setuptools.command.install_lib:install_lib
+build_ext = setuptools.command.build_ext:build_ext
+sdist = setuptools.command.sdist:sdist
+
diff --git a/Lib/setuptools.egg-info/top_level.txt b/Lib/setuptools.egg-info/top_level.txt
new file mode 100644
index 0000000..4577c6a
--- /dev/null
+++ b/Lib/setuptools.egg-info/top_level.txt
@@ -0,0 +1,3 @@
+easy_install
+pkg_resources
+setuptools
diff --git a/Lib/setuptools.egg-info/zip-safe b/Lib/setuptools.egg-info/zip-safe
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/Lib/setuptools.egg-info/zip-safe
diff --git a/Lib/setuptools/__init__.py b/Lib/setuptools/__init__.py
new file mode 100644
index 0000000..3921ce2
--- /dev/null
+++ b/Lib/setuptools/__init__.py
@@ -0,0 +1,64 @@
+"""Extensions to the 'distutils' for large or complex distributions"""
+from setuptools.extension import Extension, Library
+from setuptools.dist import Distribution, Feature, _get_unpatched
+import distutils.core, setuptools.command
+from setuptools.depends import Require
+from distutils.core import Command as _Command
+from distutils.util import convert_path
+import os.path
+
+__version__ = '0.7a1'
+__all__ = [
+ 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
+ 'find_packages'
+]
+
+bootstrap_install_from = None
+
+def find_packages(where='.', exclude=()):
+ """Return a list all Python packages found within directory 'where'
+
+ 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
+ will be converted to the appropriate local path syntax. 'exclude' is a
+ sequence of package names to exclude; '*' can be used as a wildcard in the
+ names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
+ 'foo' itself).
+ """
+ out = []
+ stack=[(convert_path(where), '')]
+ while stack:
+ where,prefix = stack.pop(0)
+ for name in os.listdir(where):
+ fn = os.path.join(where,name)
+ if (os.path.isdir(fn) and
+ os.path.isfile(os.path.join(fn,'__init__.py'))
+ ):
+ out.append(prefix+name); stack.append((fn,prefix+name+'.'))
+ for pat in exclude:
+ from fnmatch import fnmatchcase
+ out = [item for item in out if not fnmatchcase(item,pat)]
+ return out
+
+setup = distutils.core.setup
+
+_Command = _get_unpatched(_Command)
+
+class Command(_Command):
+ __doc__ = _Command.__doc__
+
+ command_consumes_arguments = False
+
+ def __init__(self, dist, **kw):
+ # Add support for keyword arguments
+ _Command.__init__(self,dist)
+ for k,v in kw.items():
+ setattr(self,k,v)
+
+ def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+ cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+ for k,v in kw.items():
+ setattr(cmd,k,v) # update command with keywords
+ return cmd
+
+import distutils.core
+distutils.core.Command = Command # we can't patch distutils.cmd, alas
diff --git a/Lib/setuptools/archive_util.py b/Lib/setuptools/archive_util.py
new file mode 100755
index 0000000..dd9c684
--- /dev/null
+++ b/Lib/setuptools/archive_util.py
@@ -0,0 +1,200 @@
+"""Utilities for extracting common archive formats"""
+
+
+__all__ = [
+ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
+ "UnrecognizedFormat", "extraction_drivers", "unpack_directory",
+]
+
+import zipfile, tarfile, os, shutil
+from pkg_resources import ensure_directory
+from distutils.errors import DistutilsError
+
+class UnrecognizedFormat(DistutilsError):
+ """Couldn't recognize the archive type"""
+
+def default_filter(src,dst):
+ """The default progress/filter callback; returns True for all files"""
+ return dst
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def unpack_archive(filename, extract_dir, progress_filter=default_filter,
+ drivers=None
+):
+ """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
+
+ `progress_filter` is a function taking two arguments: a source path
+ internal to the archive ('/'-separated), and a filesystem path where it
+ will be extracted. The callback must return the desired extract path
+ (which may be the same as the one passed in), or else ``None`` to skip
+ that file or directory. The callback can thus be used to report on the
+ progress of the extraction, as well as to filter the items extracted or
+ alter their extraction paths.
+
+ `drivers`, if supplied, must be a non-empty sequence of functions with the
+ same signature as this function (minus the `drivers` argument), that raise
+ ``UnrecognizedFormat`` if they do not support extracting the designated
+ archive type. The `drivers` are tried in sequence until one is found that
+ does not raise an error, or until all are exhausted (in which case
+ ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
+ drivers, the module's ``extraction_drivers`` constant will be used, which
+ means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
+ order.
+ """
+ for driver in drivers or extraction_drivers:
+ try:
+ driver(filename, extract_dir, progress_filter)
+ except UnrecognizedFormat:
+ continue
+ else:
+ return
+ else:
+ raise UnrecognizedFormat(
+ "Not a recognized archive type: %s" % filename
+ )
+
+
+
+
+
+
+
+def unpack_directory(filename, extract_dir, progress_filter=default_filter):
+ """"Unpack" a directory, using the same interface as for archives
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a directory
+ """
+ if not os.path.isdir(filename):
+ raise UnrecognizedFormat("%s is not a directory" % (filename,))
+
+ paths = {filename:('',extract_dir)}
+ for base, dirs, files in os.walk(filename):
+ src,dst = paths[base]
+ for d in dirs:
+ paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d)
+ for f in files:
+ name = src+f
+ target = os.path.join(dst,f)
+ target = progress_filter(src+f, target)
+ if not target:
+ continue # skip non-files
+ ensure_directory(target)
+ f = os.path.join(base,f)
+ shutil.copyfile(f, target)
+ shutil.copystat(f, target)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack zip `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
+ by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+
+ if not zipfile.is_zipfile(filename):
+ raise UnrecognizedFormat("%s is not a zip file" % (filename,))
+
+ z = zipfile.ZipFile(filename)
+ try:
+ for info in z.infolist():
+ name = info.filename
+
+ # don't extract absolute paths or ones with .. in them
+ if name.startswith('/') or '..' in name:
+ continue
+
+ target = os.path.join(extract_dir, *name.split('/'))
+ target = progress_filter(name, target)
+ if not target:
+ continue
+ if name.endswith('/'):
+ # directory
+ ensure_directory(target)
+ else:
+ # file
+ ensure_directory(target)
+ data = z.read(info.filename)
+ f = open(target,'wb')
+ try:
+ f.write(data)
+ finally:
+ f.close()
+ del data
+ finally:
+ z.close()
+
+
+def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
+ by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+
+ try:
+ tarobj = tarfile.open(filename)
+ except tarfile.TarError:
+ raise UnrecognizedFormat(
+ "%s is not a compressed or uncompressed tar file" % (filename,)
+ )
+
+ try:
+ tarobj.chown = lambda *args: None # don't do any chowning!
+ for member in tarobj:
+ if member.isfile() or member.isdir():
+ name = member.name
+ # don't extract absolute paths or ones with .. in them
+ if not name.startswith('/') and '..' not in name:
+ dst = os.path.join(extract_dir, *name.split('/'))
+ dst = progress_filter(name, dst)
+ if dst:
+ if dst.endswith(os.sep):
+ dst = dst[:-1]
+ tarobj._extract_member(member,dst) # XXX Ugh
+ return True
+ finally:
+ tarobj.close()
+
+
+
+
+extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/Lib/setuptools/cli.exe b/Lib/setuptools/cli.exe
new file mode 100755
index 0000000..fc83339
--- /dev/null
+++ b/Lib/setuptools/cli.exe
Binary files differ
diff --git a/Lib/setuptools/command/__init__.py b/Lib/setuptools/command/__init__.py
new file mode 100644
index 0000000..bff53e7
--- /dev/null
+++ b/Lib/setuptools/command/__init__.py
@@ -0,0 +1,19 @@
+__all__ = [
+ 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
+ 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
+ 'sdist', 'setopt', 'test', 'upload', 'install_egg_info', 'install_scripts',
+]
+
+import sys
+if sys.version>='2.5':
+ # In Python 2.5 and above, distutils includes its own upload command
+ __all__.remove('upload')
+
+
+from distutils.command.bdist import bdist
+
+if 'egg' not in bdist.format_commands:
+ bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
+ bdist.format_commands.append('egg')
+
+del bdist, sys
diff --git a/Lib/setuptools/command/alias.py b/Lib/setuptools/command/alias.py
new file mode 100755
index 0000000..1df474a
--- /dev/null
+++ b/Lib/setuptools/command/alias.py
@@ -0,0 +1,79 @@
+import distutils, os
+from setuptools import Command
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import *
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+def shquote(arg):
+ """Quote an argument for later parsing by shlex.split()"""
+ for c in '"', "'", "\\", "#":
+ if c in arg: return repr(arg)
+ if arg.split()<>[arg]:
+ return repr(arg)
+ return arg
+
+
+class alias(option_base):
+ """Define a shortcut that invokes one or more commands"""
+
+ description = "define a shortcut to invoke one or more commands"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('remove', 'r', 'remove (unset) the alias'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.args = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.remove and len(self.args)<>1:
+ raise DistutilsOptionError(
+ "Must specify exactly one argument (the alias name) when "
+ "using --remove"
+ )
+
+ def run(self):
+ aliases = self.distribution.get_option_dict('aliases')
+
+ if not self.args:
+ print "Command Aliases"
+ print "---------------"
+ for alias in aliases:
+ print "setup.py alias", format_alias(alias, aliases)
+ return
+
+ elif len(self.args)==1:
+ alias, = self.args
+ if self.remove:
+ command = None
+ elif alias in aliases:
+ print "setup.py alias", format_alias(alias, aliases)
+ return
+ else:
+ print "No alias definition found for %r" % alias
+ return
+ else:
+ alias = self.args[0]
+ command = ' '.join(map(shquote,self.args[1:]))
+
+ edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+ source, command = aliases[name]
+ if source == config_file('global'):
+ source = '--global-config '
+ elif source == config_file('user'):
+ source = '--user-config '
+ elif source == config_file('local'):
+ source = ''
+ else:
+ source = '--filename=%r' % source
+ return source+name+' '+command
diff --git a/Lib/setuptools/command/bdist_egg.py b/Lib/setuptools/command/bdist_egg.py
new file mode 100644
index 0000000..617d88d
--- /dev/null
+++ b/Lib/setuptools/command/bdist_egg.py
@@ -0,0 +1,449 @@
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+# This module should be kept compatible with Python 2.3
+import sys, os, marshal
+from setuptools import Command
+from distutils.dir_util import remove_tree, mkpath
+from distutils.sysconfig import get_python_version, get_python_lib
+from distutils import log
+from pkg_resources import get_build_platform, Distribution
+from types import CodeType
+from setuptools.extension import Library
+
+def write_stub(resource, pyfile):
+ f = open(pyfile,'w')
+ f.write('\n'.join([
+ "def __bootstrap__():",
+ " global __bootstrap__, __loader__, __file__",
+ " import sys, pkg_resources, imp",
+ " __file__ = pkg_resources.resource_filename(__name__,%r)"
+ % resource,
+ " del __bootstrap__, __loader__",
+ " imp.load_dynamic(__name__,__file__)",
+ "__bootstrap__()",
+ "" # terminal \n
+ ]))
+ f.close()
+
+# stub __init__.py for packages distributed without one
+NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
+
+
+
+
+
+
+
+
+
+
+class bdist_egg(Command):
+
+ description = "create an \"egg\" distribution"
+
+ user_options = [
+ ('bdist-dir=', 'b',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_build_platform()),
+ ('exclude-source-files', None,
+ "remove all .py files from the generated egg"),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ]
+
+ boolean_options = [
+ 'keep-temp', 'skip-build', 'exclude-source-files'
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def initialize_options (self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.dist_dir = None
+ self.skip_build = 0
+ self.egg_output = None
+ self.exclude_source_files = None
+
+
+ def finalize_options(self):
+ ei_cmd = self.get_finalized_command("egg_info")
+ self.egg_info = ei_cmd.egg_info
+
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+ if self.plat_name is None:
+ self.plat_name = get_build_platform()
+
+ self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
+
+ if self.egg_output is None:
+
+ # Compute filename of the output egg
+ basename = Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version,
+ get_python_version(),
+ self.distribution.has_ext_modules() and self.plat_name
+ ).egg_name()
+
+ self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
+
+
+
+
+
+
+
+
+ def do_install_data(self):
+ # Hack for packages that install data to install's --install-lib
+ self.get_finalized_command('install').install_lib = self.bdist_dir
+
+ site_packages = os.path.normcase(os.path.realpath(get_python_lib()))
+ old, self.distribution.data_files = self.distribution.data_files,[]
+
+ for item in old:
+ if isinstance(item,tuple) and len(item)==2:
+ if os.path.isabs(item[0]):
+ realpath = os.path.realpath(item[0])
+ normalized = os.path.normcase(realpath)
+ if normalized==site_packages or normalized.startswith(
+ site_packages+os.sep
+ ):
+ item = realpath[len(site_packages)+1:], item[1]
+ # XXX else: raise ???
+ self.distribution.data_files.append(item)
+
+ try:
+ log.info("installing package data to %s" % self.bdist_dir)
+ self.call_command('install_data', force=0, root=None)
+ finally:
+ self.distribution.data_files = old
+
+
+ def get_outputs(self):
+ return [self.egg_output]
+
+
+ def call_command(self,cmdname,**kw):
+ """Invoke reinitialized command `cmdname` with keyword args"""
+ for dirname in INSTALL_DIRECTORY_ATTRS:
+ kw.setdefault(dirname,self.bdist_dir)
+ kw.setdefault('skip_build',self.skip_build)
+ kw.setdefault('dry_run', self.dry_run)
+ cmd = self.reinitialize_command(cmdname, **kw)
+ self.run_command(cmdname)
+ return cmd
+
+
+ def run(self):
+ # Generate metadata first
+ self.run_command("egg_info")
+
+ # We run install_lib before install_data, because some data hacks
+ # pull their data path from the install_lib command.
+ log.info("installing library code to %s" % self.bdist_dir)
+ instcmd = self.get_finalized_command('install')
+ old_root = instcmd.root; instcmd.root = None
+ cmd = self.call_command('install_lib', warn_dir=0)
+ instcmd.root = old_root
+
+ all_outputs, ext_outputs = self.get_ext_outputs()
+ self.stubs = []
+ to_compile = []
+ for (p,ext_name) in enumerate(ext_outputs):
+ filename,ext = os.path.splitext(ext_name)
+ pyfile = os.path.join(self.bdist_dir, filename + '.py')
+ self.stubs.append(pyfile)
+ log.info("creating stub loader for %s" % ext_name)
+ if not self.dry_run:
+ write_stub(os.path.basename(ext_name), pyfile)
+ to_compile.append(pyfile)
+ ext_outputs[p] = ext_name.replace(os.sep,'/')
+
+ to_compile.extend(self.make_init_files())
+ if to_compile:
+ cmd.byte_compile(to_compile)
+
+ if self.distribution.data_files:
+ self.do_install_data()
+
+ # Make the EGG-INFO directory
+ archive_root = self.bdist_dir
+ egg_info = os.path.join(archive_root,'EGG-INFO')
+ self.mkpath(egg_info)
+ if self.distribution.scripts:
+ script_dir = os.path.join(egg_info, 'scripts')
+ log.info("installing scripts to %s" % script_dir)
+ self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
+
+ native_libs = os.path.join(self.egg_info,"native_libs.txt")
+ if all_outputs:
+ log.info("writing %s" % native_libs)
+ if not self.dry_run:
+ libs_file = open(native_libs, 'wt')
+ libs_file.write('\n'.join(all_outputs))
+ libs_file.write('\n')
+ libs_file.close()
+ elif os.path.isfile(native_libs):
+ log.info("removing %s" % native_libs)
+ if not self.dry_run:
+ os.unlink(native_libs)
+
+ for filename in os.listdir(self.egg_info):
+ path = os.path.join(self.egg_info,filename)
+ if os.path.isfile(path):
+ self.copy_file(path,os.path.join(egg_info,filename))
+
+ write_safety_flag(
+ os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
+ )
+
+ if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
+ log.warn(
+ "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+ if self.exclude_source_files:
+ self.zap_pyfiles()
+
+ # Make the archive
+ make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
+ dry_run=self.dry_run)
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ # Add to 'Distribution.dist_files' so that the "upload" command works
+ getattr(self.distribution,'dist_files',[]).append(
+ ('bdist_egg',get_python_version(),self.egg_output))
+
+ def zap_pyfiles(self):
+ log.info("Removing .py files from temporary directory")
+ for base,dirs,files in walk_egg(self.bdist_dir):
+ for name in files:
+ if name.endswith('.py'):
+ path = os.path.join(base,name)
+ log.debug("Deleting %s", path)
+ os.unlink(path)
+
+ def zip_safe(self):
+ safe = getattr(self.distribution,'zip_safe',None)
+ if safe is not None:
+ return safe
+ log.warn("zip_safe flag not set; analyzing archive contents...")
+ return analyze_egg(self.bdist_dir, self.stubs)
+
+ def make_init_files(self):
+ """Create missing package __init__ files"""
+ init_files = []
+ for base,dirs,files in walk_egg(self.bdist_dir):
+ if base==self.bdist_dir:
+ # don't put an __init__ in the root
+ continue
+ for name in files:
+ if name.endswith('.py'):
+ if '__init__.py' not in files:
+ pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
+ if self.distribution.has_contents_for(pkg):
+ log.warn("Creating missing __init__.py for %s",pkg)
+ filename = os.path.join(base,'__init__.py')
+ if not self.dry_run:
+ f = open(filename,'w'); f.write(NS_PKG_STUB)
+ f.close()
+ init_files.append(filename)
+ break
+ else:
+ # not a package, don't traverse to subdirectories
+ dirs[:] = []
+
+ return init_files
+
+ def get_ext_outputs(self):
+ """Get a list of relative paths to C extensions in the output distro"""
+
+ all_outputs = []
+ ext_outputs = []
+
+ paths = {self.bdist_dir:''}
+ for base, dirs, files in os.walk(self.bdist_dir):
+ for filename in files:
+ if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+ all_outputs.append(paths[base]+filename)
+ for filename in dirs:
+ paths[os.path.join(base,filename)] = paths[base]+filename+'/'
+
+ if self.distribution.has_ext_modules():
+ build_cmd = self.get_finalized_command('build_ext')
+ for ext in build_cmd.extensions:
+ if isinstance(ext,Library):
+ continue
+ fullname = build_cmd.get_ext_fullname(ext.name)
+ filename = build_cmd.get_ext_filename(fullname)
+ if not os.path.basename(filename).startswith('dl-'):
+ if os.path.exists(os.path.join(self.bdist_dir,filename)):
+ ext_outputs.append(filename)
+
+ return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+
+
+
+
+
+
+
+
+
+
+def walk_egg(egg_dir):
+ """Walk an unpacked egg's contents, skipping the metadata directory"""
+ walker = os.walk(egg_dir)
+ base,dirs,files = walker.next()
+ if 'EGG-INFO' in dirs:
+ dirs.remove('EGG-INFO')
+ yield base,dirs,files
+ for bdf in walker:
+ yield bdf
+
+def analyze_egg(egg_dir, stubs):
+ # check for existing flag in EGG-INFO
+ for flag,fn in safety_flags.items():
+ if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
+ return flag
+
+ safe = True
+ for base, dirs, files in walk_egg(egg_dir):
+ for name in files:
+ if name.endswith('.py') or name.endswith('.pyw'):
+ continue
+ elif name.endswith('.pyc') or name.endswith('.pyo'):
+ # always scan, even if we already know we're not safe
+ safe = scan_module(egg_dir, base, name, stubs) and safe
+ return safe
+
+def write_safety_flag(egg_dir, safe):
+ # Write or remove zip safety flag file(s)
+ for flag,fn in safety_flags.items():
+ fn = os.path.join(egg_dir, fn)
+ if os.path.exists(fn):
+ if safe is None or bool(safe)<>flag:
+ os.unlink(fn)
+ elif safe is not None and bool(safe)==flag:
+ open(fn,'w').close()
+
+safety_flags = {
+ True: 'zip-safe',
+ False: 'not-zip-safe',
+}
+
+def scan_module(egg_dir, base, name, stubs):
+ """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+ filename = os.path.join(base,name)
+ if filename[:-1] in stubs:
+ return True # Extension module
+ pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
+ module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
+ f = open(filename,'rb'); f.read(8) # skip magic & date
+ code = marshal.load(f); f.close()
+ safe = True
+ symbols = dict.fromkeys(iter_symbols(code))
+ for bad in ['__file__', '__path__']:
+ if bad in symbols:
+ log.warn("%s: module references %s", module, bad)
+ safe = False
+ if 'inspect' in symbols:
+ for bad in [
+ 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
+ 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
+ 'getinnerframes', 'getouterframes', 'stack', 'trace'
+ ]:
+ if bad in symbols:
+ log.warn("%s: module MAY be using inspect.%s", module, bad)
+ safe = False
+ if '__name__' in symbols and '__main__' in symbols and '.' not in module:
+ if get_python_version()>="2.4":
+ log.warn("%s: top-level module may be 'python -m' script", module)
+ safe = False
+ return safe
+
+def iter_symbols(code):
+ """Yield names and strings used by `code` and its nested code objects"""
+ for name in code.co_names: yield name
+ for const in code.co_consts:
+ if isinstance(const,basestring):
+ yield const
+ elif isinstance(const,CodeType):
+ for name in iter_symbols(const):
+ yield name
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = [
+ 'install_lib', 'install_dir', 'install_data', 'install_base'
+]
+
+def make_zipfile (zip_filename, base_dir, verbose=0, dry_run=0, compress=None):
+ """Create a zip file from all the files under 'base_dir'. The output
+ zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
+ Python module (if available) or the InfoZIP "zip" utility (if installed
+ and found on the default search path). If neither tool is available,
+ raises DistutilsExecError. Returns the name of the output zip file.
+ """
+ import zipfile
+ mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+ log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+ def visit (z, dirname, names):
+ for name in names:
+ path = os.path.normpath(os.path.join(dirname, name))
+ if os.path.isfile(path):
+ p = path[len(base_dir)+1:]
+ if not dry_run:
+ z.write(path, p)
+ log.debug("adding '%s'" % p)
+
+ if compress is None:
+ compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
+
+ compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
+ if not dry_run:
+ z = zipfile.ZipFile(zip_filename, "w", compression=compression)
+ os.path.walk(base_dir, visit, z)
+ z.close()
+ else:
+ os.path.walk(base_dir, visit, None)
+
+ return zip_filename
diff --git a/Lib/setuptools/command/bdist_rpm.py b/Lib/setuptools/command/bdist_rpm.py
new file mode 100755
index 0000000..00e07ac
--- /dev/null
+++ b/Lib/setuptools/command/bdist_rpm.py
@@ -0,0 +1,37 @@
+# This is just a kludge so that bdist_rpm doesn't guess wrong about the
+# distribution name and version, if the egg_info command is going to alter
+# them, and another kludge to allow you to build old-style non-egg RPMs
+
+from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm
+
+class bdist_rpm(_bdist_rpm):
+
+ def initialize_options(self):
+ _bdist_rpm.initialize_options(self)
+ self.no_egg = None
+
+ def run(self):
+ self.run_command('egg_info') # ensure distro name is up-to-date
+ _bdist_rpm.run(self)
+
+ def _make_spec_file(self):
+ version = self.distribution.get_version()
+ rpmversion = version.replace('-','_')
+ spec = _bdist_rpm._make_spec_file(self)
+ line23 = '%define version '+version
+ line24 = '%define version '+rpmversion
+ spec = [
+ line.replace(
+ "Source0: %{name}-%{version}.tar",
+ "Source0: %{name}-%{unmangled_version}.tar"
+ ).replace(
+ "setup.py install ",
+ "setup.py install --single-version-externally-managed "
+ ).replace(
+ "%setup",
+ "%setup -n %{name}-%{unmangled_version}"
+ ).replace(line23,line24)
+ for line in spec
+ ]
+ spec.insert(spec.index(line24)+1, "%define unmangled_version "+version)
+ return spec
diff --git a/Lib/setuptools/command/build_ext.py b/Lib/setuptools/command/build_ext.py
new file mode 100644
index 0000000..f8551fb
--- /dev/null
+++ b/Lib/setuptools/command/build_ext.py
@@ -0,0 +1,285 @@
+from distutils.command.build_ext import build_ext as _du_build_ext
+try:
+ # Attempt to use Pyrex for building extensions, if available
+ from Pyrex.Distutils.build_ext import build_ext as _build_ext
+except ImportError:
+ _build_ext = _du_build_ext
+
+import os, sys
+from distutils.file_util import copy_file
+from setuptools.extension import Library
+from distutils.ccompiler import new_compiler
+from distutils.sysconfig import customize_compiler, get_config_var
+get_config_var("LDSHARED") # make sure _config_vars is initialized
+from distutils.sysconfig import _config_vars
+from distutils import log
+from distutils.errors import *
+
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+ use_stubs = True
+elif os.name != 'nt':
+ try:
+ from dl import RTLD_NOW
+ have_rtld = True
+ use_stubs = True
+ except ImportError:
+ pass
+
+def if_dl(s):
+ if have_rtld:
+ return s
+ return ''
+
+
+
+
+
+
+class build_ext(_build_ext):
+ def run(self):
+ """Build extensions in build directory, then copy if --inplace"""
+ old_inplace, self.inplace = self.inplace, 0
+ _build_ext.run(self)
+ self.inplace = old_inplace
+ if old_inplace:
+ self.copy_extensions_to_source()
+
+ def copy_extensions_to_source(self):
+ build_py = self.get_finalized_command('build_py')
+ for ext in self.extensions:
+ fullname = self.get_ext_fullname(ext.name)
+ filename = self.get_ext_filename(fullname)
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[:-1])
+ package_dir = build_py.get_package_dir(package)
+ dest_filename = os.path.join(package_dir,os.path.basename(filename))
+ src_filename = os.path.join(self.build_lib,filename)
+
+ # Always copy, even if source is older than destination, to ensure
+ # that the right extensions for the current Python/platform are
+ # used.
+ copy_file(
+ src_filename, dest_filename, verbose=self.verbose,
+ dry_run=self.dry_run
+ )
+ if ext._needs_stub:
+ self.write_stub(package_dir or os.curdir, ext, True)
+
+
+ if _build_ext is not _du_build_ext:
+ # Workaround for problems using some Pyrex versions w/SWIG and/or 2.4
+ def swig_sources(self, sources, *otherargs):
+ # first do any Pyrex processing
+ sources = _build_ext.swig_sources(self, sources) or sources
+ # Then do any actual SWIG stuff on the remainder
+ return _du_build_ext.swig_sources(self, sources, *otherargs)
+
+
+
+ def get_ext_filename(self, fullname):
+ filename = _build_ext.get_ext_filename(self,fullname)
+ ext = self.ext_map[fullname]
+ if isinstance(ext,Library):
+ fn, ext = os.path.splitext(filename)
+ return self.shlib_compiler.library_filename(fn,libtype)
+ elif use_stubs and ext._links_to_dynamic:
+ d,fn = os.path.split(filename)
+ return os.path.join(d,'dl-'+fn)
+ else:
+ return filename
+
+ def initialize_options(self):
+ _build_ext.initialize_options(self)
+ self.shlib_compiler = None
+ self.shlibs = []
+ self.ext_map = {}
+
+ def finalize_options(self):
+ _build_ext.finalize_options(self)
+ self.extensions = self.extensions or []
+ self.check_extensions_list(self.extensions)
+ self.shlibs = [ext for ext in self.extensions
+ if isinstance(ext,Library)]
+ if self.shlibs:
+ self.setup_shlib_compiler()
+ for ext in self.extensions:
+ fullname = ext._full_name = self.get_ext_fullname(ext.name)
+ self.ext_map[fullname] = ext
+ ltd = ext._links_to_dynamic = \
+ self.shlibs and self.links_to_dynamic(ext) or False
+ ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library)
+ filename = ext._file_name = self.get_ext_filename(fullname)
+ libdir = os.path.dirname(os.path.join(self.build_lib,filename))
+ if ltd and libdir not in ext.library_dirs:
+ ext.library_dirs.append(libdir)
+ if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+ ext.runtime_library_dirs.append(os.curdir)
+
+
+
+ def setup_shlib_compiler(self):
+ compiler = self.shlib_compiler = new_compiler(
+ compiler=self.compiler, dry_run=self.dry_run, force=self.force
+ )
+ if sys.platform == "darwin":
+ tmp = _config_vars.copy()
+ try:
+ # XXX Help! I don't have any idea whether these are right...
+ _config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
+ _config_vars['CCSHARED'] = " -dynamiclib"
+ _config_vars['SO'] = ".dylib"
+ customize_compiler(compiler)
+ finally:
+ _config_vars.clear()
+ _config_vars.update(tmp)
+ else:
+ customize_compiler(compiler)
+
+ if self.include_dirs is not None:
+ compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name,value) in self.define:
+ compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ compiler.undefine_macro(macro)
+ if self.libraries is not None:
+ compiler.set_libraries(self.libraries)
+ if self.library_dirs is not None:
+ compiler.set_library_dirs(self.library_dirs)
+ if self.rpath is not None:
+ compiler.set_runtime_library_dirs(self.rpath)
+ if self.link_objects is not None:
+ compiler.set_link_objects(self.link_objects)
+
+ # hack so distutils' build_extension() builds a library instead
+ compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+
+
+ def get_export_symbols(self, ext):
+ if isinstance(ext,Library):
+ return ext.export_symbols
+ return _build_ext.get_export_symbols(self,ext)
+
+ def build_extension(self, ext):
+ _compiler = self.compiler
+ try:
+ if isinstance(ext,Library):
+ self.compiler = self.shlib_compiler
+ _build_ext.build_extension(self,ext)
+ if ext._needs_stub:
+ self.write_stub(
+ self.get_finalized_command('build_py').build_lib, ext
+ )
+ finally:
+ self.compiler = _compiler
+
+ def links_to_dynamic(self, ext):
+ """Return true if 'ext' links to a dynamic lib in the same package"""
+ # XXX this should check to ensure the lib is actually being built
+ # XXX as dynamic, and not just using a locally-found version or a
+ # XXX static-compiled version
+ libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+ pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
+ for libname in ext.libraries:
+ if pkg+libname in libnames: return True
+ return False
+
+ def get_outputs(self):
+ outputs = _build_ext.get_outputs(self)
+ optimize = self.get_finalized_command('build_py').optimize
+ for ext in self.extensions:
+ if ext._needs_stub:
+ base = os.path.join(self.build_lib, *ext._full_name.split('.'))
+ outputs.append(base+'.py')
+ outputs.append(base+'.pyc')
+ if optimize:
+ outputs.append(base+'.pyo')
+ return outputs
+
+ def write_stub(self, output_dir, ext, compile=False):
+ log.info("writing stub loader for %s to %s",ext._full_name, output_dir)
+ stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py'
+ if compile and os.path.exists(stub_file):
+ raise DistutilsError(stub_file+" already exists! Please delete.")
+ if not self.dry_run:
+ f = open(stub_file,'w')
+ f.write('\n'.join([
+ "def __bootstrap__():",
+ " global __bootstrap__, __file__, __loader__",
+ " import sys, os, pkg_resources, imp"+if_dl(", dl"),
+ " __file__ = pkg_resources.resource_filename(__name__,%r)"
+ % os.path.basename(ext._file_name),
+ " del __bootstrap__",
+ " if '__loader__' in globals():",
+ " del __loader__",
+ if_dl(" old_flags = sys.getdlopenflags()"),
+ " old_dir = os.getcwd()",
+ " try:",
+ " os.chdir(os.path.dirname(__file__))",
+ if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
+ " imp.load_dynamic(__name__,__file__)",
+ " finally:",
+ if_dl(" sys.setdlopenflags(old_flags)"),
+ " os.chdir(old_dir)",
+ "__bootstrap__()",
+ "" # terminal \n
+ ]))
+ f.close()
+ if compile:
+ from distutils.util import byte_compile
+ byte_compile([stub_file], optimize=0,
+ force=True, dry_run=self.dry_run)
+ optimize = self.get_finalized_command('install_lib').optimize
+ if optimize > 0:
+ byte_compile([stub_file], optimize=optimize,
+ force=True, dry_run=self.dry_run)
+ if os.path.exists(stub_file) and not self.dry_run:
+ os.unlink(stub_file)
+
+
+if use_stubs or os.name=='nt':
+ # Build shared libraries
+ #
+ def link_shared_object(self, objects, output_libname, output_dir=None,
+ libraries=None, library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None
+ ): self.link(
+ self.SHARED_LIBRARY, objects, output_libname,
+ output_dir, libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug, extra_preargs, extra_postargs,
+ build_temp, target_lang
+ )
+else:
+ # Build static libraries everywhere else
+ libtype = 'static'
+
+ def link_shared_object(self, objects, output_libname, output_dir=None,
+ libraries=None, library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None
+ ):
+ # XXX we need to either disallow these attrs on Library instances,
+ # or warn/abort here if set, or something...
+ #libraries=None, library_dirs=None, runtime_library_dirs=None,
+ #export_symbols=None, extra_preargs=None, extra_postargs=None,
+ #build_temp=None
+
+ assert output_dir is None # distutils build_ext doesn't pass this
+ output_dir,filename = os.path.split(output_libname)
+ basename, ext = os.path.splitext(filename)
+ if self.library_filename("x").startswith('lib'):
+ # strip 'lib' prefix; this is kludgy if some platform uses
+ # a different prefix
+ basename = basename[3:]
+
+ self.create_static_lib(
+ objects, basename, output_dir, debug, target_lang
+ )
diff --git a/Lib/setuptools/command/build_py.py b/Lib/setuptools/command/build_py.py
new file mode 100644
index 0000000..77a9b23
--- /dev/null
+++ b/Lib/setuptools/command/build_py.py
@@ -0,0 +1,192 @@
+import os.path, sys, fnmatch
+from distutils.command.build_py import build_py as _build_py
+from distutils.util import convert_path
+from glob import glob
+
+class build_py(_build_py):
+ """Enhanced 'build_py' command that includes data files with packages
+
+ The data files are specified via a 'package_data' argument to 'setup()'.
+ See 'setuptools.dist.Distribution' for more details.
+
+ Also, this version of the 'build_py' command allows you to specify both
+ 'py_modules' and 'packages' in the same setup operation.
+ """
+ def finalize_options(self):
+ _build_py.finalize_options(self)
+ self.package_data = self.distribution.package_data
+ self.exclude_package_data = self.distribution.exclude_package_data or {}
+ if 'data_files' in self.__dict__: del self.__dict__['data_files']
+
+ def run(self):
+ """Build modules, packages, and copy data files to build directory"""
+ if not self.py_modules and not self.packages:
+ return
+
+ if self.py_modules:
+ self.build_modules()
+
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ # Only compile actual .py files, using our base class' idea of what our
+ # output files are.
+ self.byte_compile(_build_py.get_outputs(self, include_bytecode=0))
+
+ def __getattr__(self,attr):
+ if attr=='data_files': # lazily compute data files
+ self.data_files = files = self._get_data_files(); return files
+ return _build_py.__getattr__(self,attr)
+
+ def _get_data_files(self):
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+ self.analyze_manifest()
+ data = []
+ for package in self.packages or ():
+ # Locate package source directory
+ src_dir = self.get_package_dir(package)
+
+ # Compute package build directory
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+ # Length of path to strip from found files
+ plen = len(src_dir)+1
+
+ # Strip directory from globbed filenames
+ filenames = [
+ file[plen:] for file in self.find_data_files(package, src_dir)
+ ]
+ data.append( (package, src_dir, build_dir, filenames) )
+ return data
+
+ def find_data_files(self, package, src_dir):
+ """Return filenames for package's data files in 'src_dir'"""
+ globs = (self.package_data.get('', [])
+ + self.package_data.get(package, []))
+ files = self.manifest_files.get(package, [])[:]
+ for pattern in globs:
+ # Each pattern has to be converted to a platform-specific path
+ files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
+ return self.exclude_data_files(package, src_dir, files)
+
+ def build_package_data(self):
+ """Copy data files into build directory"""
+ lastdir = None
+ for package, src_dir, build_dir, filenames in self.data_files:
+ for filename in filenames:
+ target = os.path.join(build_dir, filename)
+ self.mkpath(os.path.dirname(target))
+ self.copy_file(os.path.join(src_dir, filename), target)
+
+
+ def analyze_manifest(self):
+ self.manifest_files = mf = {}
+ if not self.distribution.include_package_data:
+ return
+ src_dirs = {}
+ for package in self.packages or ():
+ # Locate package source directory
+ src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ for path in ei_cmd.filelist.files:
+ if path.endswith('.py'):
+ continue
+ d,f = os.path.split(assert_relative(path))
+ prev = None
+ while d and d!=prev and d not in src_dirs:
+ prev = d
+ d, df = os.path.split(d)
+ f = os.path.join(df, f)
+ if d in src_dirs:
+ mf.setdefault(src_dirs[d],[]).append(path)
+
+
+ def get_data_files(self): pass # kludge 2.4 for lazy computation
+
+ if sys.version<"2.4": # Python 2.4 already has this code
+ def get_outputs(self, include_bytecode=1):
+ """Return complete list of files copied to the build directory
+
+ This includes both '.py' files and data files, as well as '.pyc'
+ and '.pyo' files if 'include_bytecode' is true. (This method is
+ needed for the 'install_lib' command to do its job properly, and to
+ generate a correct installation manifest.)
+ """
+ return _build_py.get_outputs(self, include_bytecode) + [
+ os.path.join(build_dir, filename)
+ for package, src_dir, build_dir,filenames in self.data_files
+ for filename in filenames
+ ]
+
+ def check_package(self, package, package_dir):
+ """Check namespace packages' __init__ for declare_namespace"""
+ try:
+ return self.packages_checked[package]
+ except KeyError:
+ pass
+
+ init_py = _build_py.check_package(self, package, package_dir)
+ self.packages_checked[package] = init_py
+
+ if not init_py or not self.distribution.namespace_packages:
+ return init_py
+
+ for pkg in self.distribution.namespace_packages:
+ if pkg==package or pkg.startswith(package+'.'):
+ break
+ else:
+ return init_py
+
+ f = open(init_py,'rU')
+ if 'declare_namespace' not in f.read():
+ from distutils.errors import DistutilsError
+ raise DistutilsError(
+ "Namespace package problem: %s is a namespace package, but its\n"
+ "__init__.py does not call declare_namespace()! Please fix it.\n"
+ '(See the setuptools manual under "Namespace Packages" for '
+ "details.)\n" % (package,)
+ )
+ f.close()
+ return init_py
+
+ def initialize_options(self):
+ self.packages_checked={}
+ _build_py.initialize_options(self)
+
+
+
+
+
+
+
+ def exclude_data_files(self, package, src_dir, files):
+ """Filter filenames for package's data files in 'src_dir'"""
+ globs = (self.exclude_package_data.get('', [])
+ + self.exclude_package_data.get(package, []))
+ bad = []
+ for pattern in globs:
+ bad.extend(
+ fnmatch.filter(
+ files, os.path.join(src_dir, convert_path(pattern))
+ )
+ )
+ bad = dict.fromkeys(bad)
+ return [f for f in files if f not in bad]
+
+
+def assert_relative(path):
+ if not os.path.isabs(path):
+ return path
+ from distutils.errors import DistutilsSetupError
+ raise DistutilsSetupError(
+"""Error: setup script specifies an absolute path:
+
+ %s
+
+setup() arguments must *always* be /-separated paths relative to the
+setup.py directory, *never* absolute paths.
+""" % path
+ )
diff --git a/Lib/setuptools/command/develop.py b/Lib/setuptools/command/develop.py
new file mode 100755
index 0000000..7ab5b23
--- /dev/null
+++ b/Lib/setuptools/command/develop.py
@@ -0,0 +1,116 @@
+from setuptools.command.easy_install import easy_install
+from distutils.util import convert_path
+from pkg_resources import Distribution, PathMetadata, normalize_path
+from distutils import log
+from distutils.errors import *
+import sys, os
+
+class develop(easy_install):
+ """Set up package for development"""
+
+ description = "install package in 'development mode'"
+
+ user_options = easy_install.user_options + [
+ ("uninstall", "u", "Uninstall this source package"),
+ ]
+
+ boolean_options = easy_install.boolean_options + ['uninstall']
+
+ command_consumes_arguments = False # override base
+
+ def run(self):
+ if self.uninstall:
+ self.multi_version = True
+ self.uninstall_link()
+ else:
+ self.install_for_development()
+ self.warn_deprecated_options()
+
+ def initialize_options(self):
+ self.uninstall = None
+ easy_install.initialize_options(self)
+
+
+
+
+
+
+
+
+
+
+ def finalize_options(self):
+ ei = self.get_finalized_command("egg_info")
+ if ei.broken_egg_info:
+ raise DistutilsError(
+ "Please rename %r to %r before using 'develop'"
+ % (ei.egg_info, ei.broken_egg_info)
+ )
+ self.args = [ei.egg_name]
+ easy_install.finalize_options(self)
+ self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link')
+ self.egg_base = ei.egg_base
+ self.egg_path = os.path.abspath(ei.egg_base)
+
+ # Make a distribution for the package's source
+ self.dist = Distribution(
+ normalize_path(self.egg_path),
+ PathMetadata(self.egg_path, os.path.abspath(ei.egg_info)),
+ project_name = ei.egg_name
+ )
+
+ def install_for_development(self):
+ # Ensure metadata is up-to-date
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ self.reinitialize_command('build_ext', inplace=1)
+ self.run_command('build_ext')
+
+ self.install_site_py() # ensure that target dir is site-safe
+
+ # create an .egg-link in the installation dir, pointing to our egg
+ log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+ if not self.dry_run:
+ f = open(self.egg_link,"w")
+ f.write(self.egg_path)
+ f.close()
+
+ # postprocess the installed distro, fixing up .pth, installing scripts,
+ # and handling requirements
+ self.process_distribution(None, self.dist)
+
+ def uninstall_link(self):
+ if os.path.exists(self.egg_link):
+ log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+ contents = [line.rstrip() for line in file(self.egg_link)]
+ if contents != [self.egg_path]:
+ log.warn("Link points to %s: uninstall aborted", contents)
+ return
+ if not self.dry_run:
+ os.unlink(self.egg_link)
+ if not self.dry_run:
+ self.update_pth(self.dist) # remove any .pth link to us
+ if self.distribution.scripts:
+ # XXX should also check for entry point scripts!
+ log.warn("Note: you must uninstall or replace scripts manually!")
+
+
+ def install_egg_scripts(self, dist):
+ if dist is not self.dist:
+ # Installing a dependency, so fall back to normal behavior
+ return easy_install.install_egg_scripts(self,dist)
+
+ # create wrapper scripts in the script dir, pointing to dist.scripts
+
+ # new-style...
+ self.install_wrapper_scripts(dist)
+
+ # ...and old-style
+ for script_name in self.distribution.scripts or []:
+ script_path = os.path.abspath(convert_path(script_name))
+ script_name = os.path.basename(script_path)
+ f = open(script_path,'rU')
+ script_text = f.read()
+ f.close()
+ self.install_script(dist, script_name, script_text, script_path)
diff --git a/Lib/setuptools/command/easy_install.py b/Lib/setuptools/command/easy_install.py
new file mode 100755
index 0000000..3ddcec4
--- /dev/null
+++ b/Lib/setuptools/command/easy_install.py
@@ -0,0 +1,1555 @@
+#!python
+"""\
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages. For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
+__ http://peak.telecommunity.com/DevCenter/EasyInstall
+"""
+import sys, os.path, zipimport, shutil, tempfile, zipfile, re, stat, random
+from glob import glob
+from setuptools import Command
+from setuptools.sandbox import run_setup
+from distutils import log, dir_util
+from distutils.sysconfig import get_python_lib
+from distutils.errors import DistutilsArgError, DistutilsOptionError, \
+ DistutilsError
+from setuptools.archive_util import unpack_archive
+from setuptools.package_index import PackageIndex, parse_bdist_wininst
+from setuptools.package_index import URL_SCHEME
+from setuptools.command import bdist_egg, egg_info
+from pkg_resources import *
+sys_executable = os.path.normpath(sys.executable)
+
+__all__ = [
+ 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
+ 'main', 'get_exe_prefixes',
+]
+
+def samefile(p1,p2):
+ if hasattr(os.path,'samefile') and (
+ os.path.exists(p1) and os.path.exists(p2)
+ ):
+ return os.path.samefile(p1,p2)
+ return (
+ os.path.normpath(os.path.normcase(p1)) ==
+ os.path.normpath(os.path.normcase(p2))
+ )
+
+class easy_install(Command):
+ """Manage a download/build/install process"""
+ description = "Find/get/install Python packages"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('prefix=', None, "installation prefix"),
+ ("zip-ok", "z", "install package as a zipfile"),
+ ("multi-version", "m", "make apps have to require() a version"),
+ ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+ ("install-dir=", "d", "install package to DIR"),
+ ("script-dir=", "s", "install scripts to DIR"),
+ ("exclude-scripts", "x", "Don't install scripts"),
+ ("always-copy", "a", "Copy all needed packages to install dir"),
+ ("index-url=", "i", "base URL of Python Package Index"),
+ ("find-links=", "f", "additional URL(s) to search for packages"),
+ ("delete-conflicting", "D", "no longer needed; don't use this"),
+ ("ignore-conflicts-at-my-risk", None,
+ "no longer needed; don't use this"),
+ ("build-directory=", "b",
+ "download/extract/build in DIR; keep the results"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('record=', None,
+ "filename in which to record list of installed files"),
+ ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+ ('site-dirs=','S',"list of directories where .pth files work"),
+ ('editable', 'e', "Install specified packages in editable form"),
+ ('no-deps', 'N', "don't install dependencies"),
+ ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+ ]
+ boolean_options = [
+ 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
+ 'delete-conflicting', 'ignore-conflicts-at-my-risk', 'editable',
+ 'no-deps',
+ ]
+ negative_opt = {'always-unzip': 'zip-ok'}
+ create_index = PackageIndex
+
+
+ def initialize_options(self):
+ self.zip_ok = None
+ self.install_dir = self.script_dir = self.exclude_scripts = None
+ self.index_url = None
+ self.find_links = None
+ self.build_directory = None
+ self.args = None
+ self.optimize = self.record = None
+ self.upgrade = self.always_copy = self.multi_version = None
+ self.editable = self.no_deps = self.allow_hosts = None
+ self.root = self.prefix = self.no_report = None
+
+ # Options not specifiable via command line
+ self.package_index = None
+ self.pth_file = None
+ self.delete_conflicting = None
+ self.ignore_conflicts_at_my_risk = None
+ self.site_dirs = None
+ self.installed_projects = {}
+ self.sitepy_installed = False
+ # Always read easy_install options, even if we are subclassed, or have
+ # an independent instance created. This ensures that defaults will
+ # always come from the standard configuration file(s)' "easy_install"
+ # section, even if this is a "develop" or "install" command, or some
+ # other embedding.
+ self._dry_run = None
+ self.verbose = self.distribution.verbose
+ self.distribution._set_command_options(
+ self, self.distribution.get_option_dict('easy_install')
+ )
+
+ def delete_blockers(self, blockers):
+ for filename in blockers:
+ if os.path.exists(filename) or os.path.islink(filename):
+ log.info("Deleting %s", filename)
+ if not self.dry_run:
+ if os.path.isdir(filename) and not os.path.islink(filename):
+ rmtree(filename)
+ else:
+ os.unlink(filename)
+
+ def finalize_options(self):
+ self._expand('install_dir','script_dir','build_directory','site_dirs')
+ # If a non-default installation directory was specified, default the
+ # script directory to match it.
+ if self.script_dir is None:
+ self.script_dir = self.install_dir
+
+ # Let install_dir get set by install_lib command, which in turn
+ # gets its info from the install command, and takes into account
+ # --prefix and --home and all that other crud.
+ self.set_undefined_options('install_lib',
+ ('install_dir','install_dir')
+ )
+ # Likewise, set default script_dir from 'install_scripts.install_dir'
+ self.set_undefined_options('install_scripts',
+ ('install_dir', 'script_dir')
+ )
+ # default --record from the install command
+ self.set_undefined_options('install', ('record', 'record'))
+ normpath = map(normalize_path, sys.path)
+ self.all_site_dirs = get_site_dirs()
+ if self.site_dirs is not None:
+ site_dirs = [
+ os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
+ ]
+ for d in site_dirs:
+ if not os.path.isdir(d):
+ log.warn("%s (in --site-dirs) does not exist", d)
+ elif normalize_path(d) not in normpath:
+ raise DistutilsOptionError(
+ d+" (in --site-dirs) is not on sys.path"
+ )
+ else:
+ self.all_site_dirs.append(normalize_path(d))
+ self.check_site_dir()
+ self.index_url = self.index_url or "http://www.python.org/pypi"
+ self.shadow_path = self.all_site_dirs[:]
+ for path_item in self.install_dir, normalize_path(self.script_dir):
+ if path_item not in self.shadow_path:
+ self.shadow_path.insert(0, path_item)
+
+ if self.allow_hosts is not None:
+ hosts = [s.strip() for s in self.allow_hosts.split(',')]
+ else:
+ hosts = ['*']
+
+ if self.package_index is None:
+ self.package_index = self.create_index(
+ self.index_url, search_path = self.shadow_path, hosts=hosts
+ )
+ self.local_index = Environment(self.shadow_path)
+
+ if self.find_links is not None:
+ if isinstance(self.find_links, basestring):
+ self.find_links = self.find_links.split()
+ else:
+ self.find_links = []
+
+ self.package_index.add_find_links(self.find_links)
+ self.set_undefined_options('install_lib', ('optimize','optimize'))
+ if not isinstance(self.optimize,int):
+ try:
+ self.optimize = int(self.optimize)
+ if not (0 <= self.optimize <= 2): raise ValueError
+ except ValueError:
+ raise DistutilsOptionError("--optimize must be 0, 1, or 2")
+
+ if self.delete_conflicting and self.ignore_conflicts_at_my_risk:
+ raise DistutilsOptionError(
+ "Can't use both --delete-conflicting and "
+ "--ignore-conflicts-at-my-risk at the same time"
+ )
+ if self.editable and not self.build_directory:
+ raise DistutilsArgError(
+ "Must specify a build directory (-b) when using --editable"
+ )
+ if not self.args:
+ raise DistutilsArgError(
+ "No urls, filenames, or requirements specified (see --help)")
+
+ self.outputs = []
+
+ def run(self):
+ if self.verbose<>self.distribution.verbose:
+ log.set_verbosity(self.verbose)
+ try:
+ for spec in self.args:
+ self.easy_install(spec, not self.no_deps)
+ if self.record:
+ outputs = self.outputs
+ if self.root: # strip any package prefix
+ root_len = len(self.root)
+ for counter in xrange(len(outputs)):
+ outputs[counter] = outputs[counter][root_len:]
+ from distutils import file_util
+ self.execute(
+ file_util.write_file, (self.record, outputs),
+ "writing list of installed files to '%s'" %
+ self.record
+ )
+ self.warn_deprecated_options()
+ finally:
+ log.set_verbosity(self.distribution.verbose)
+
+ def pseudo_tempname(self):
+ """Return a pseudo-tempname base in the install directory.
+ This code is intentionally naive; if a malicious party can write to
+ the target directory you're already in deep doodoo.
+ """
+ try:
+ pid = os.getpid()
+ except:
+ pid = random.randint(0,sys.maxint)
+ return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+ def warn_deprecated_options(self):
+ if self.delete_conflicting or self.ignore_conflicts_at_my_risk:
+ log.warn(
+ "Note: The -D, --delete-conflicting and"
+ " --ignore-conflicts-at-my-risk no longer have any purpose"
+ " and should not be used."
+ )
+
+ def check_site_dir(self):
+ """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+ instdir = normalize_path(self.install_dir)
+ pth_file = os.path.join(instdir,'easy-install.pth')
+
+ # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+ is_site_dir = instdir in self.all_site_dirs
+
+ if not is_site_dir:
+ # No? Then directly test whether it does .pth file processing
+ is_site_dir = self.check_pth_processing()
+ else:
+ # make sure we can write to target dir
+ testfile = self.pseudo_tempname()+'.write-test'
+ test_exists = os.path.exists(testfile)
+ try:
+ if test_exists: os.unlink(testfile)
+ open(testfile,'w').close()
+ os.unlink(testfile)
+ except (OSError,IOError):
+ self.cant_write_to_target()
+
+ if not is_site_dir and not self.multi_version:
+ # Can't install non-multi to non-site dir
+ raise DistutilsError(self.no_default_version_msg())
+
+ if is_site_dir:
+ if self.pth_file is None:
+ self.pth_file = PthDistributions(pth_file)
+ else:
+ self.pth_file = None
+
+ PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
+ if instdir not in map(normalize_path, filter(None,PYTHONPATH)):
+ # only PYTHONPATH dirs need a site.py, so pretend it's there
+ self.sitepy_installed = True
+
+ self.install_dir = instdir
+
+
+ def cant_write_to_target(self):
+ msg = """can't create or remove files in install directory
+
+The following error occurred while trying to add or remove files in the
+installation directory:
+
+ %s
+
+The installation directory you specified (via --install-dir, --prefix, or
+the distutils default setting) was:
+
+ %s
+""" % (sys.exc_info()[1], self.install_dir,)
+
+ if not os.path.exists(self.install_dir):
+ msg += """
+This directory does not currently exist. Please create it and try again, or
+choose a different installation directory (using the -d or --install-dir
+option).
+"""
+ else:
+ msg += """
+Perhaps your account does not have write access to this directory? If the
+installation directory is a system-owned directory, you may need to sign in
+as the administrator or "root" account. If you do not have administrative
+access to this machine, you may wish to choose a different installation
+directory, preferably one that is listed in your PYTHONPATH environment
+variable.
+
+For information on other options, you may wish to consult the
+documentation at:
+
+ http://peak.telecommunity.com/EasyInstall.html
+
+Please make the appropriate changes for your system and try again.
+"""
+ raise DistutilsError(msg)
+
+
+
+
+ def check_pth_processing(self):
+ """Empirically verify whether .pth files are supported in inst. dir"""
+ instdir = self.install_dir
+ log.info("Checking .pth file support in %s", instdir)
+ pth_file = self.pseudo_tempname()+".pth"
+ ok_file = pth_file+'.ok'
+ ok_exists = os.path.exists(ok_file)
+ try:
+ if ok_exists: os.unlink(ok_file)
+ f = open(pth_file,'w')
+ except (OSError,IOError):
+ self.cant_write_to_target()
+ else:
+ try:
+ f.write("import os;open(%r,'w').write('OK')\n" % (ok_file,))
+ f.close(); f=None
+ executable = sys.executable
+ if os.name=='nt':
+ dirname,basename = os.path.split(executable)
+ alt = os.path.join(dirname,'pythonw.exe')
+ if basename.lower()=='python.exe' and os.path.exists(alt):
+ # use pythonw.exe to avoid opening a console window
+ executable = alt
+ if ' ' in executable: executable='"%s"' % executable
+ from distutils.spawn import spawn
+ spawn([executable,'-E','-c','pass'],0)
+
+ if os.path.exists(ok_file):
+ log.info(
+ "TEST PASSED: %s appears to support .pth files",
+ instdir
+ )
+ return True
+ finally:
+ if f: f.close()
+ if os.path.exists(ok_file): os.unlink(ok_file)
+ if os.path.exists(pth_file): os.unlink(pth_file)
+ if not self.multi_version:
+ log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+ return False
+
+ def install_egg_scripts(self, dist):
+ """Write all the scripts for `dist`, unless scripts are excluded"""
+
+ self.install_wrapper_scripts(dist)
+ if self.exclude_scripts or not dist.metadata_isdir('scripts'):
+ return
+
+ for script_name in dist.metadata_listdir('scripts'):
+ self.install_script(
+ dist, script_name,
+ dist.get_metadata('scripts/'+script_name).replace('\r','\n')
+ )
+
+ def add_output(self, path):
+ if os.path.isdir(path):
+ for base, dirs, files in os.walk(path):
+ for filename in files:
+ self.outputs.append(os.path.join(base,filename))
+ else:
+ self.outputs.append(path)
+
+ def not_editable(self, spec):
+ if self.editable:
+ raise DistutilsArgError(
+ "Invalid argument %r: you can't use filenames or URLs "
+ "with --editable (except via the --find-links option)."
+ % (spec,)
+ )
+
+ def check_editable(self,spec):
+ if not self.editable:
+ return
+
+ if os.path.exists(os.path.join(self.build_directory, spec.key)):
+ raise DistutilsArgError(
+ "%r already exists in %s; can't do a checkout there" %
+ (spec.key, self.build_directory)
+ )
+
+
+
+ def easy_install(self, spec, deps=False):
+ tmpdir = tempfile.mkdtemp(prefix="easy_install-")
+ download = None
+ self.install_site_py()
+
+ try:
+ if not isinstance(spec,Requirement):
+ if URL_SCHEME(spec):
+ # It's a url, download it to tmpdir and process
+ self.not_editable(spec)
+ download = self.package_index.download(spec, tmpdir)
+ return self.install_item(None, download, tmpdir, deps, True)
+
+ elif os.path.exists(spec):
+ # Existing file or directory, just process it directly
+ self.not_editable(spec)
+ return self.install_item(None, spec, tmpdir, deps, True)
+ else:
+ spec = parse_requirement_arg(spec)
+
+ self.check_editable(spec)
+ dist = self.package_index.fetch_distribution(
+ spec, tmpdir, self.upgrade, self.editable, not self.always_copy
+ )
+
+ if dist is None:
+ msg = "Could not find suitable distribution for %r" % spec
+ if self.always_copy:
+ msg+=" (--always-copy skips system and development eggs)"
+ raise DistutilsError(msg)
+ elif dist.precedence==DEVELOP_DIST:
+ # .egg-info dists don't need installing, just process deps
+ self.process_distribution(spec, dist, deps, "Using")
+ return dist
+ else:
+ return self.install_item(spec, dist.location, tmpdir, deps)
+
+ finally:
+ if os.path.exists(tmpdir):
+ rmtree(tmpdir)
+
+ def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+
+ # Installation is also needed if file in tmpdir or is not an egg
+ install_needed = install_needed or os.path.dirname(download) == tmpdir
+ install_needed = install_needed or not download.endswith('.egg')
+
+ log.info("Processing %s", os.path.basename(download))
+
+ if install_needed or self.always_copy:
+ dists = self.install_eggs(spec, download, tmpdir)
+ for dist in dists:
+ self.process_distribution(spec, dist, deps)
+ else:
+ dists = [self.check_conflicts(self.egg_distribution(download))]
+ self.process_distribution(spec, dists[0], deps, "Using")
+
+ if spec is not None:
+ for dist in dists:
+ if dist in spec:
+ return dist
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def process_distribution(self, requirement, dist, deps=True, *info):
+ self.update_pth(dist)
+ self.package_index.add(dist)
+ self.local_index.add(dist)
+ self.install_egg_scripts(dist)
+ self.installed_projects[dist.key] = dist
+ log.warn(self.installation_report(requirement, dist, *info))
+ if not deps and not self.always_copy:
+ return
+ elif requirement is not None and dist.key != requirement.key:
+ log.warn("Skipping dependencies for %s", dist)
+ return # XXX this is not the distribution we were looking for
+ elif requirement is None or dist not in requirement:
+ # if we wound up with a different version, resolve what we've got
+ distreq = dist.as_requirement()
+ requirement = requirement or distreq
+ requirement = Requirement(
+ distreq.project_name, distreq.specs, requirement.extras
+ )
+ if dist.has_metadata('dependency_links.txt'):
+ self.package_index.add_find_links(
+ dist.get_metadata_lines('dependency_links.txt')
+ )
+ log.info("Processing dependencies for %s", requirement)
+ try:
+ distros = WorkingSet([]).resolve(
+ [requirement], self.local_index, self.easy_install
+ )
+ except DistributionNotFound, e:
+ raise DistutilsError(
+ "Could not find required distribution %s" % e.args
+ )
+ except VersionConflict, e:
+ raise DistutilsError(
+ "Installed distribution %s conflicts with requirement %s"
+ % e.args
+ )
+ if self.always_copy:
+ # Force all the relevant distros to be copied or activated
+ for dist in distros:
+ if dist.key not in self.installed_projects:
+ self.easy_install(dist.as_requirement())
+
+ def should_unzip(self, dist):
+ if self.zip_ok is not None:
+ return not self.zip_ok
+ if dist.has_metadata('not-zip-safe'):
+ return True
+ if not dist.has_metadata('zip-safe'):
+ return True
+ return False
+
+ def maybe_move(self, spec, dist_filename, setup_base):
+ dst = os.path.join(self.build_directory, spec.key)
+ if os.path.exists(dst):
+ log.warn(
+ "%r already exists in %s; build directory %s will not be kept",
+ spec.key, self.build_directory, setup_base
+ )
+ return setup_base
+ if os.path.isdir(dist_filename):
+ setup_base = dist_filename
+ else:
+ if os.path.dirname(dist_filename)==setup_base:
+ os.unlink(dist_filename) # get it out of the tmp dir
+ contents = os.listdir(setup_base)
+ if len(contents)==1:
+ dist_filename = os.path.join(setup_base,contents[0])
+ if os.path.isdir(dist_filename):
+ # if the only thing there is a directory, move it instead
+ setup_base = dist_filename
+ ensure_directory(dst); shutil.move(setup_base, dst)
+ return dst
+
+ def install_wrapper_scripts(self, dist):
+ if not self.exclude_scripts:
+ for args in get_script_args(dist):
+ self.write_script(*args)
+
+
+
+
+
+
+ def install_script(self, dist, script_name, script_text, dev_path=None):
+ """Generate a legacy script wrapper and install it"""
+ spec = str(dist.as_requirement())
+
+ if dev_path:
+ script_text = get_script_header(script_text) + (
+ "# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r\n"
+ "__requires__ = %(spec)r\n"
+ "from pkg_resources import require; require(%(spec)r)\n"
+ "del require\n"
+ "__file__ = %(dev_path)r\n"
+ "execfile(__file__)\n"
+ ) % locals()
+ else:
+ script_text = get_script_header(script_text) + (
+ "# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r\n"
+ "__requires__ = %(spec)r\n"
+ "import pkg_resources\n"
+ "pkg_resources.run_script(%(spec)r, %(script_name)r)\n"
+ ) % locals()
+
+ self.write_script(script_name, script_text)
+
+ def write_script(self, script_name, contents, mode="t", blockers=()):
+ """Write an executable file to the scripts directory"""
+ self.delete_blockers( # clean up old .py/.pyw w/o a script
+ [os.path.join(self.script_dir,x) for x in blockers])
+ log.info("Installing %s script to %s", script_name, self.script_dir)
+ target = os.path.join(self.script_dir, script_name)
+ self.add_output(target)
+
+ if not self.dry_run:
+ ensure_directory(target)
+ f = open(target,"w"+mode)
+ f.write(contents)
+ f.close()
+ try:
+ os.chmod(target,0755)
+ except (AttributeError, os.error):
+ pass
+
+ def install_eggs(self, spec, dist_filename, tmpdir):
+ # .egg dirs or files are already built, so just return them
+ if dist_filename.lower().endswith('.egg'):
+ return [self.install_egg(dist_filename, tmpdir)]
+ elif dist_filename.lower().endswith('.exe'):
+ return [self.install_exe(dist_filename, tmpdir)]
+
+ # Anything else, try to extract and build
+ setup_base = tmpdir
+ if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+ unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+ elif os.path.isdir(dist_filename):
+ setup_base = os.path.abspath(dist_filename)
+
+ if (setup_base.startswith(tmpdir) # something we downloaded
+ and self.build_directory and spec is not None
+ ):
+ setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+ # Find the setup.py file
+ setup_script = os.path.join(setup_base, 'setup.py')
+
+ if not os.path.exists(setup_script):
+ setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+ if not setups:
+ raise DistutilsError(
+ "Couldn't find a setup script in %s" % dist_filename
+ )
+ if len(setups)>1:
+ raise DistutilsError(
+ "Multiple setup scripts in %s" % dist_filename
+ )
+ setup_script = setups[0]
+
+ # Now run it, and return the result
+ if self.editable:
+ log.warn(self.report_editable(spec, setup_script))
+ return []
+ else:
+ return self.build_and_install(setup_script, setup_base)
+
+ def egg_distribution(self, egg_path):
+ if os.path.isdir(egg_path):
+ metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
+ else:
+ metadata = EggMetadata(zipimport.zipimporter(egg_path))
+ return Distribution.from_filename(egg_path,metadata=metadata)
+
+ def install_egg(self, egg_path, tmpdir):
+ destination = os.path.join(self.install_dir,os.path.basename(egg_path))
+ destination = os.path.abspath(destination)
+ if not self.dry_run:
+ ensure_directory(destination)
+
+ dist = self.egg_distribution(egg_path)
+ self.check_conflicts(dist)
+ if not samefile(egg_path, destination):
+ if os.path.isdir(destination) and not os.path.islink(destination):
+ dir_util.remove_tree(destination, dry_run=self.dry_run)
+ elif os.path.exists(destination):
+ self.execute(os.unlink,(destination,),"Removing "+destination)
+ uncache_zipdir(destination)
+ if os.path.isdir(egg_path):
+ if egg_path.startswith(tmpdir):
+ f,m = shutil.move, "Moving"
+ else:
+ f,m = shutil.copytree, "Copying"
+ elif self.should_unzip(dist):
+ self.mkpath(destination)
+ f,m = self.unpack_and_compile, "Extracting"
+ elif egg_path.startswith(tmpdir):
+ f,m = shutil.move, "Moving"
+ else:
+ f,m = shutil.copy2, "Copying"
+
+ self.execute(f, (egg_path, destination),
+ (m+" %s to %s") %
+ (os.path.basename(egg_path),os.path.dirname(destination)))
+
+ self.add_output(destination)
+ return self.egg_distribution(destination)
+
+ def install_exe(self, dist_filename, tmpdir):
+ # See if it's valid, get data
+ cfg = extract_wininst_cfg(dist_filename)
+ if cfg is None:
+ raise DistutilsError(
+ "%s is not a valid distutils Windows .exe" % dist_filename
+ )
+ # Create a dummy distribution object until we build the real distro
+ dist = Distribution(None,
+ project_name=cfg.get('metadata','name'),
+ version=cfg.get('metadata','version'), platform="win32"
+ )
+
+ # Convert the .exe to an unpacked egg
+ egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
+ egg_tmp = egg_path+'.tmp'
+ egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+ pkg_inf = os.path.join(egg_info, 'PKG-INFO')
+ ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
+ dist._provider = PathMetadata(egg_tmp, egg_info) # XXX
+ self.exe_to_egg(dist_filename, egg_tmp)
+
+ # Write EGG-INFO/PKG-INFO
+ if not os.path.exists(pkg_inf):
+ f = open(pkg_inf,'w')
+ f.write('Metadata-Version: 1.0\n')
+ for k,v in cfg.items('metadata'):
+ if k<>'target_version':
+ f.write('%s: %s\n' % (k.replace('_','-').title(), v))
+ f.close()
+ script_dir = os.path.join(egg_info,'scripts')
+ self.delete_blockers( # delete entry-point scripts to avoid duping
+ [os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
+ )
+ # Build .egg file from tmpdir
+ bdist_egg.make_zipfile(
+ egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
+ )
+ # install the .egg
+ return self.install_egg(egg_path, tmpdir)
+
+ def exe_to_egg(self, dist_filename, egg_tmp):
+ """Extract a bdist_wininst to the directories an egg would use"""
+ # Check for .pth file and set up prefix translations
+ prefixes = get_exe_prefixes(dist_filename)
+ to_compile = []
+ native_libs = []
+ top_level = {}
+
+ def process(src,dst):
+ for old,new in prefixes:
+ if src.startswith(old):
+ src = new+src[len(old):]
+ parts = src.split('/')
+ dst = os.path.join(egg_tmp, *parts)
+ dl = dst.lower()
+ if dl.endswith('.pyd') or dl.endswith('.dll'):
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ native_libs.append(src)
+ elif dl.endswith('.py') and old!='SCRIPTS/':
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ to_compile.append(dst)
+ return dst
+ if not src.endswith('.pth'):
+ log.warn("WARNING: can't process %s", src)
+ return None
+
+ # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+ unpack_archive(dist_filename, egg_tmp, process)
+ stubs = []
+ for res in native_libs:
+ if res.lower().endswith('.pyd'): # create stubs for .pyd's
+ parts = res.split('/')
+ resource, parts[-1] = parts[-1], parts[-1][:-1]
+ pyfile = os.path.join(egg_tmp, *parts)
+ to_compile.append(pyfile); stubs.append(pyfile)
+ bdist_egg.write_stub(resource, pyfile)
+
+ self.byte_compile(to_compile) # compile .py's
+ bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
+ bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
+
+ for name in 'top_level','native_libs':
+ if locals()[name]:
+ txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
+ if not os.path.exists(txt):
+ open(txt,'w').write('\n'.join(locals()[name])+'\n')
+
+ def check_conflicts(self, dist):
+ """Verify that there are no conflicting "old-style" packages"""
+
+ return dist # XXX temporarily disable until new strategy is stable
+ from imp import find_module, get_suffixes
+ from glob import glob
+
+ blockers = []
+ names = dict.fromkeys(dist._get_metadata('top_level.txt')) # XXX private attr
+
+ exts = {'.pyc':1, '.pyo':1} # get_suffixes() might leave one out
+ for ext,mode,typ in get_suffixes():
+ exts[ext] = 1
+
+ for path,files in expand_paths([self.install_dir]+self.all_site_dirs):
+ for filename in files:
+ base,ext = os.path.splitext(filename)
+ if base in names:
+ if not ext:
+ # no extension, check for package
+ try:
+ f, filename, descr = find_module(base, [path])
+ except ImportError:
+ continue
+ else:
+ if f: f.close()
+ if filename not in blockers:
+ blockers.append(filename)
+ elif ext in exts and base!='site': # XXX ugh
+ blockers.append(os.path.join(path,filename))
+ if blockers:
+ self.found_conflicts(dist, blockers)
+
+ return dist
+
+ def found_conflicts(self, dist, blockers):
+ if self.delete_conflicting:
+ log.warn("Attempting to delete conflicting packages:")
+ return self.delete_blockers(blockers)
+
+ msg = """\
+-------------------------------------------------------------------------
+CONFLICT WARNING:
+
+The following modules or packages have the same names as modules or
+packages being installed, and will be *before* the installed packages in
+Python's search path. You MUST remove all of the relevant files and
+directories before you will be able to use the package(s) you are
+installing:
+
+ %s
+
+""" % '\n '.join(blockers)
+
+ if self.ignore_conflicts_at_my_risk:
+ msg += """\
+(Note: you can run EasyInstall on '%s' with the
+--delete-conflicting option to attempt deletion of the above files
+and/or directories.)
+""" % dist.project_name
+ else:
+ msg += """\
+Note: you can attempt this installation again with EasyInstall, and use
+either the --delete-conflicting (-D) option or the
+--ignore-conflicts-at-my-risk option, to either delete the above files
+and directories, or to ignore the conflicts, respectively. Note that if
+you ignore the conflicts, the installed package(s) may not work.
+"""
+ msg += """\
+-------------------------------------------------------------------------
+"""
+ sys.stderr.write(msg)
+ sys.stderr.flush()
+ if not self.ignore_conflicts_at_my_risk:
+ raise DistutilsError("Installation aborted due to conflicts")
+
+ def installation_report(self, req, dist, what="Installed"):
+ """Helpful installation message for display to package users"""
+ msg = "\n%(what)s %(eggloc)s%(extras)s"
+ if self.multi_version and not self.no_report:
+ msg += """
+
+Because this distribution was installed --multi-version or --install-dir,
+before you can import modules from this package in an application, you
+will need to 'import pkg_resources' and then use a 'require()' call
+similar to one of these examples, in order to select the desired version:
+
+ pkg_resources.require("%(name)s") # latest installed version
+ pkg_resources.require("%(name)s==%(version)s") # this exact version
+ pkg_resources.require("%(name)s>=%(version)s") # this version or higher
+"""
+ if self.install_dir not in map(normalize_path,sys.path):
+ msg += """
+
+Note also that the installation directory must be on sys.path at runtime for
+this to work. (e.g. by being the application's script directory, by being on
+PYTHONPATH, or by being added to sys.path by your code.)
+"""
+ eggloc = dist.location
+ name = dist.project_name
+ version = dist.version
+ extras = '' # TODO: self.report_extras(req, dist)
+ return msg % locals()
+
+ def report_editable(self, spec, setup_script):
+ dirname = os.path.dirname(setup_script)
+ python = sys.executable
+ return """\nExtracted editable version of %(spec)s to %(dirname)s
+
+If it uses setuptools in its setup script, you can activate it in
+"development" mode by going to that directory and running::
+
+ %(python)s setup.py develop
+
+See the setuptools documentation for the "develop" command for more info.
+""" % locals()
+
+ def run_setup(self, setup_script, setup_base, args):
+ sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+ sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+ args = list(args)
+ if self.verbose>2:
+ v = 'v' * (self.verbose - 1)
+ args.insert(0,'-'+v)
+ elif self.verbose<2:
+ args.insert(0,'-q')
+ if self.dry_run:
+ args.insert(0,'-n')
+ log.info(
+ "Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
+ )
+ try:
+ run_setup(setup_script, args)
+ except SystemExit, v:
+ raise DistutilsError("Setup script exited with %s" % (v.args[0],))
+
+ def build_and_install(self, setup_script, setup_base):
+ args = ['bdist_egg', '--dist-dir']
+ dist_dir = tempfile.mkdtemp(
+ prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+ )
+ try:
+ args.append(dist_dir)
+ self.run_setup(setup_script, setup_base, args)
+ all_eggs = Environment([dist_dir])
+ eggs = []
+ for key in all_eggs:
+ for dist in all_eggs[key]:
+ eggs.append(self.install_egg(dist.location, setup_base))
+ if not eggs and not self.dry_run:
+ log.warn("No eggs found in %s (setup script problem?)",
+ dist_dir)
+ return eggs
+ finally:
+ rmtree(dist_dir)
+ log.set_verbosity(self.verbose) # restore our log verbosity
+
+ def update_pth(self,dist):
+ if self.pth_file is None:
+ return
+
+ for d in self.pth_file[dist.key]: # drop old entries
+ if self.multi_version or d.location != dist.location:
+ log.info("Removing %s from easy-install.pth file", d)
+ self.pth_file.remove(d)
+ if d.location in self.shadow_path:
+ self.shadow_path.remove(d.location)
+
+ if not self.multi_version:
+ if dist.location in self.pth_file.paths:
+ log.info(
+ "%s is already the active version in easy-install.pth",
+ dist
+ )
+ else:
+ log.info("Adding %s to easy-install.pth file", dist)
+ self.pth_file.add(dist) # add new entry
+ if dist.location not in self.shadow_path:
+ self.shadow_path.append(dist.location)
+
+ if not self.dry_run:
+
+ self.pth_file.save()
+
+ if dist.key=='setuptools':
+ # Ensure that setuptools itself never becomes unavailable!
+ # XXX should this check for latest version?
+ filename = os.path.join(self.install_dir,'setuptools.pth')
+ if os.path.islink(filename): os.unlink(filename)
+ f = open(filename, 'wt')
+ f.write(self.pth_file.make_relative(dist.location)+'\n')
+ f.close()
+
+ def unpack_progress(self, src, dst):
+ # Progress filter for unpacking
+ log.debug("Unpacking %s to %s", src, dst)
+ return dst # only unpack-and-compile skips files for dry run
+
+ def unpack_and_compile(self, egg_path, destination):
+ to_compile = []
+
+ def pf(src,dst):
+ if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+ to_compile.append(dst)
+ self.unpack_progress(src,dst)
+ return not self.dry_run and dst or None
+
+ unpack_archive(egg_path, destination, pf)
+ self.byte_compile(to_compile)
+
+
+ def byte_compile(self, to_compile):
+ from distutils.util import byte_compile
+ try:
+ # try to make the byte compile messages quieter
+ log.set_verbosity(self.verbose - 1)
+
+ byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+ if self.optimize:
+ byte_compile(
+ to_compile, optimize=self.optimize, force=1,
+ dry_run=self.dry_run
+ )
+ finally:
+ log.set_verbosity(self.verbose) # restore original verbosity
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def no_default_version_msg(self):
+ return """bad install directory or PYTHONPATH
+
+You are attempting to install a package to a directory that is not
+on PYTHONPATH and which Python does not read ".pth" files from. The
+installation directory you specified (via --install-dir, --prefix, or
+the distutils default setting) was:
+
+ %s
+
+and your PYTHONPATH environment variable currently contains:
+
+ %r
+
+Here are some of your options for correcting the problem:
+
+* You can choose a different installation directory, i.e., one that is
+ on PYTHONPATH or supports .pth files
+
+* You can add the installation directory to the PYTHONPATH environment
+ variable. (It must then also be on PYTHONPATH whenever you run
+ Python and want to use the package(s) you are installing.)
+
+* You can set up the installation directory to support ".pth" files by
+ using one of the approaches described here:
+
+ http://peak.telecommunity.com/EasyInstall.html#custom-installation-locations
+
+Please make the appropriate changes for your system and try again.""" % (
+ self.install_dir, os.environ.get('PYTHONPATH','')
+ )
+
+
+
+
+
+
+
+
+
+
+ def install_site_py(self):
+ """Make sure there's a site.py in the target dir, if needed"""
+
+ if self.sitepy_installed:
+ return # already did it, or don't need to
+
+ sitepy = os.path.join(self.install_dir, "site.py")
+ source = resource_string("setuptools", "site-patch.py")
+ current = ""
+
+ if os.path.exists(sitepy):
+ log.debug("Checking existing site.py in %s", self.install_dir)
+ current = open(sitepy,'rb').read()
+ if not current.startswith('def __boot():'):
+ raise DistutilsError(
+ "%s is not a setuptools-generated site.py; please"
+ " remove it." % sitepy
+ )
+
+ if current != source:
+ log.info("Creating %s", sitepy)
+ if not self.dry_run:
+ ensure_directory(sitepy)
+ f = open(sitepy,'wb')
+ f.write(source)
+ f.close()
+ self.byte_compile([sitepy])
+
+ self.sitepy_installed = True
+
+
+
+
+
+
+
+
+
+
+
+
+ INSTALL_SCHEMES = dict(
+ posix = dict(
+ install_dir = '$base/lib/python$py_version_short/site-packages',
+ script_dir = '$base/bin',
+ ),
+ )
+
+ DEFAULT_SCHEME = dict(
+ install_dir = '$base/Lib/site-packages',
+ script_dir = '$base/Scripts',
+ )
+
+ def _expand(self, *attrs):
+ config_vars = self.get_finalized_command('install').config_vars
+
+ if self.prefix:
+ # Set default install_dir/scripts from --prefix
+ config_vars = config_vars.copy()
+ config_vars['base'] = self.prefix
+ scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
+ for attr,val in scheme.items():
+ if getattr(self,attr,None) is None:
+ setattr(self,attr,val)
+
+ from distutils.util import subst_vars
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ val = subst_vars(val, config_vars)
+ if os.name == 'posix':
+ val = os.path.expanduser(val)
+ setattr(self, attr, val)
+
+
+
+
+
+
+
+
+
+def get_site_dirs():
+ # return a list of 'site' dirs
+ sitedirs = filter(None,os.environ.get('PYTHONPATH','').split(os.pathsep))
+ prefixes = [sys.prefix]
+ if sys.exec_prefix != sys.prefix:
+ prefixes.append(sys.exec_prefix)
+ for prefix in prefixes:
+ if prefix:
+ if sys.platform in ('os2emx', 'riscos'):
+ sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
+ elif os.sep == '/':
+ sitedirs.extend([os.path.join(prefix,
+ "lib",
+ "python" + sys.version[:3],
+ "site-packages"),
+ os.path.join(prefix, "lib", "site-python")])
+ else:
+ sitedirs.extend(
+ [prefix, os.path.join(prefix, "lib", "site-packages")]
+ )
+ if sys.platform == 'darwin':
+ # for framework builds *only* we add the standard Apple
+ # locations. Currently only per-user, but /Library and
+ # /Network/Library could be added too
+ if 'Python.framework' in prefix:
+ home = os.environ.get('HOME')
+ if home:
+ sitedirs.append(
+ os.path.join(home,
+ 'Library',
+ 'Python',
+ sys.version[:3],
+ 'site-packages'))
+ for plat_specific in (0,1):
+ site_lib = get_python_lib(plat_specific)
+ if site_lib not in sitedirs: sitedirs.append(site_lib)
+
+ sitedirs = map(normalize_path, sitedirs)
+ return sitedirs
+
+
+def expand_paths(inputs):
+ """Yield sys.path directories that might contain "old-style" packages"""
+
+ seen = {}
+
+ for dirname in inputs:
+ dirname = normalize_path(dirname)
+ if dirname in seen:
+ continue
+
+ seen[dirname] = 1
+ if not os.path.isdir(dirname):
+ continue
+
+ files = os.listdir(dirname)
+ yield dirname, files
+
+ for name in files:
+ if not name.endswith('.pth'):
+ # We only care about the .pth files
+ continue
+ if name in ('easy-install.pth','setuptools.pth'):
+ # Ignore .pth files that we control
+ continue
+
+ # Read the .pth file
+ f = open(os.path.join(dirname,name))
+ lines = list(yield_lines(f))
+ f.close()
+
+ # Yield existing non-dupe, non-import directory lines from it
+ for line in lines:
+ if not line.startswith("import"):
+ line = normalize_path(line.rstrip())
+ if line not in seen:
+ seen[line] = 1
+ if not os.path.isdir(line):
+ continue
+ yield line, os.listdir(line)
+
+
+def extract_wininst_cfg(dist_filename):
+ """Extract configuration data from a bdist_wininst .exe
+
+ Returns a ConfigParser.RawConfigParser, or None
+ """
+ f = open(dist_filename,'rb')
+ try:
+ endrec = zipfile._EndRecData(f)
+ if endrec is None:
+ return None
+
+ prepended = (endrec[9] - endrec[5]) - endrec[6]
+ if prepended < 12: # no wininst data here
+ return None
+ f.seek(prepended-12)
+
+ import struct, StringIO, ConfigParser
+ tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
+ if tag not in (0x1234567A, 0x1234567B):
+ return None # not a valid tag
+
+ f.seek(prepended-(12+cfglen+bmlen))
+ cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
+ try:
+ cfg.readfp(StringIO.StringIO(f.read(cfglen).split(chr(0),1)[0]))
+ except ConfigParser.Error:
+ return None
+ if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
+ return None
+ return cfg
+
+ finally:
+ f.close()
+
+
+
+
+
+
+
+
+def get_exe_prefixes(exe_filename):
+ """Get exe->egg path translations for a given .exe file"""
+
+ prefixes = [
+ ('PURELIB/', ''),
+ ('PLATLIB/', ''),
+ ('SCRIPTS/', 'EGG-INFO/scripts/')
+ ]
+ z = zipfile.ZipFile(exe_filename)
+ try:
+ for info in z.infolist():
+ name = info.filename
+ parts = name.split('/')
+ if len(parts)==3 and parts[2]=='PKG-INFO':
+ if parts[1].endswith('.egg-info'):
+ prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
+ break
+ if len(parts)<>2 or not name.endswith('.pth'):
+ continue
+ if name.endswith('-nspkg.pth'):
+ continue
+ if parts[0] in ('PURELIB','PLATLIB'):
+ for pth in yield_lines(z.read(name)):
+ pth = pth.strip().replace('\\','/')
+ if not pth.startswith('import'):
+ prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
+ finally:
+ z.close()
+
+ prefixes.sort(); prefixes.reverse()
+ return prefixes
+
+
+def parse_requirement_arg(spec):
+ try:
+ return Requirement.parse(spec)
+ except ValueError:
+ raise DistutilsError(
+ "Not a URL, existing file, or requirement spec: %r" % (spec,)
+ )
+
+class PthDistributions(Environment):
+ """A .pth file with Distribution paths in it"""
+
+ dirty = False
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.basedir = normalize_path(os.path.dirname(self.filename))
+ self._load(); Environment.__init__(self, [], None, None)
+ for path in yield_lines(self.paths):
+ map(self.add, find_distributions(path, True))
+
+ def _load(self):
+ self.paths = []
+ saw_import = False
+ seen = {}
+ if os.path.isfile(self.filename):
+ for line in open(self.filename,'rt'):
+ if line.startswith('import'):
+ saw_import = True
+ continue
+ path = line.rstrip()
+ self.paths.append(path)
+ if not path.strip() or path.strip().startswith('#'):
+ continue
+ # skip non-existent paths, in case somebody deleted a package
+ # manually, and duplicate paths as well
+ path = self.paths[-1] = normalize_path(
+ os.path.join(self.basedir,path)
+ )
+ if not os.path.exists(path) or path in seen:
+ self.paths.pop() # skip it
+ self.dirty = True # we cleaned up, so we're dirty now :)
+ continue
+ seen[path] = 1
+
+ if self.paths and not saw_import:
+ self.dirty = True # ensure anything we touch has import wrappers
+ while self.paths and not self.paths[-1].strip():
+ self.paths.pop()
+
+ def save(self):
+ """Write changed .pth file back to disk"""
+ if not self.dirty:
+ return
+
+ data = '\n'.join(map(self.make_relative,self.paths))
+ if data:
+ log.debug("Saving %s", self.filename)
+ data = (
+ "import sys; sys.__plen = len(sys.path)\n"
+ "%s\n"
+ "import sys; new=sys.path[sys.__plen:];"
+ " del sys.path[sys.__plen:];"
+ " p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
+ " sys.__egginsert = p+len(new)\n"
+ ) % data
+
+ if os.path.islink(self.filename):
+ os.unlink(self.filename)
+ f = open(self.filename,'wb')
+ f.write(data); f.close()
+
+ elif os.path.exists(self.filename):
+ log.debug("Deleting empty %s", self.filename)
+ os.unlink(self.filename)
+
+ self.dirty = False
+
+ def add(self,dist):
+ """Add `dist` to the distribution map"""
+ if dist.location not in self.paths:
+ self.paths.append(dist.location); self.dirty = True
+ Environment.add(self,dist)
+
+ def remove(self,dist):
+ """Remove `dist` from the distribution map"""
+ while dist.location in self.paths:
+ self.paths.remove(dist.location); self.dirty = True
+ Environment.remove(self,dist)
+
+
+ def make_relative(self,path):
+ if normalize_path(os.path.dirname(path))==self.basedir:
+ return os.path.basename(path)
+ return path
+
+
+def get_script_header(script_text, executable=sys_executable):
+ """Create a #! line, getting options (if any) from script_text"""
+ from distutils.command.build_scripts import first_line_re
+ first, rest = (script_text+'\n').split('\n',1)
+ match = first_line_re.match(first)
+ options = ''
+ if match:
+ script_text = rest
+ options = match.group(1) or ''
+ if options:
+ options = ' '+options
+ return "#!%(executable)s%(options)s\n" % locals()
+
+
+def auto_chmod(func, arg, exc):
+ if func is os.remove and os.name=='nt':
+ os.chmod(arg, stat.S_IWRITE)
+ return func(arg)
+ exc = sys.exc_info()
+ raise exc[0], (exc[1][0], exc[1][1] + (" %s %s" % (func,arg)))
+
+
+def uncache_zipdir(path):
+ """Ensure that the zip directory cache doesn't have stale info for path"""
+ from zipimport import _zip_directory_cache as zdc
+ if path in zdc:
+ del zdc[path]
+ else:
+ path = normalize_path(path)
+ for p in zdc:
+ if normalize_path(p)==path:
+ del zdc[p]
+ return
+
+
+def get_script_args(dist, executable=sys_executable):
+ """Yield write_script() argument tuples for a distribution's entrypoints"""
+ spec = str(dist.as_requirement())
+ header = get_script_header("", executable)
+ for group in 'console_scripts', 'gui_scripts':
+ for name,ep in dist.get_entry_map(group).items():
+ script_text = (
+ "# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n"
+ "__requires__ = %(spec)r\n"
+ "import sys\n"
+ "from pkg_resources import load_entry_point\n"
+ "\n"
+ "sys.exit(\n"
+ " load_entry_point(%(spec)r, %(group)r, %(name)r)()\n"
+ ")\n"
+ ) % locals()
+ if sys.platform=='win32':
+ # On Windows, add a .py extension and an .exe launcher
+ if group=='gui_scripts':
+ ext, launcher = '-script.pyw', 'gui.exe'
+ old = ['.pyw']
+ new_header = re.sub('(?i)python.exe','pythonw.exe',header)
+ else:
+ ext, launcher = '-script.py', 'cli.exe'
+ old = ['.py','.pyc','.pyo']
+ new_header = re.sub('(?i)pythonw.exe','pythonw.exe',header)
+
+ if os.path.exists(new_header[2:-1]):
+ hdr = new_header
+ else:
+ hdr = header
+ yield (name+ext, hdr+script_text, 't', [name+x for x in old])
+ yield (
+ name+'.exe', resource_string('setuptools', launcher),
+ 'b' # write in binary mode
+ )
+ else:
+ # On other platforms, we assume the right thing to do is to
+ # just write the stub with no extension.
+ yield (name, header+script_text)
+
+def rmtree(path, ignore_errors=False, onerror=auto_chmod):
+ """Recursively delete a directory tree.
+
+ This code is taken from the Python 2.4 version of 'shutil', because
+ the 2.3 version doesn't really work right.
+ """
+ if ignore_errors:
+ def onerror(*args):
+ pass
+ elif onerror is None:
+ def onerror(*args):
+ raise
+ names = []
+ try:
+ names = os.listdir(path)
+ except os.error, err:
+ onerror(os.listdir, path, sys.exc_info())
+ for name in names:
+ fullname = os.path.join(path, name)
+ try:
+ mode = os.lstat(fullname).st_mode
+ except os.error:
+ mode = 0
+ if stat.S_ISDIR(mode):
+ rmtree(fullname, ignore_errors, onerror)
+ else:
+ try:
+ os.remove(fullname)
+ except os.error, err:
+ onerror(os.remove, fullname, sys.exc_info())
+ try:
+ os.rmdir(path)
+ except os.error:
+ onerror(os.rmdir, path, sys.exc_info())
+
+
+
+
+
+
+
+def main(argv=None, **kw):
+ from setuptools import setup
+ from setuptools.dist import Distribution
+ import distutils.core
+
+ USAGE = """\
+usage: %(script)s [options] requirement_or_url ...
+ or: %(script)s --help
+"""
+
+ def gen_usage (script_name):
+ script = os.path.basename(script_name)
+ return USAGE % vars()
+
+ def with_ei_usage(f):
+ old_gen_usage = distutils.core.gen_usage
+ try:
+ distutils.core.gen_usage = gen_usage
+ return f()
+ finally:
+ distutils.core.gen_usage = old_gen_usage
+
+ class DistributionWithoutHelpCommands(Distribution):
+ def _show_help(self,*args,**kw):
+ with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
+
+ if argv is None:
+ argv = sys.argv[1:]
+
+ with_ei_usage(lambda:
+ setup(
+ script_args = ['-q','easy_install', '-v']+argv,
+ script_name = sys.argv[0] or 'easy_install',
+ distclass=DistributionWithoutHelpCommands, **kw
+ )
+ )
diff --git a/Lib/setuptools/command/egg_info.py b/Lib/setuptools/command/egg_info.py
new file mode 100755
index 0000000..b68fb39
--- /dev/null
+++ b/Lib/setuptools/command/egg_info.py
@@ -0,0 +1,365 @@
+"""setuptools.command.egg_info
+
+Create a distribution's .egg-info directory and contents"""
+
+# This module should be kept compatible with Python 2.3
+import os, re
+from setuptools import Command
+from distutils.errors import *
+from distutils import log
+from setuptools.command.sdist import sdist
+from distutils import file_util
+from distutils.util import convert_path
+from distutils.filelist import FileList
+from pkg_resources import parse_requirements, safe_name, parse_version, \
+ safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename
+from sdist import walk_revctrl
+
+class egg_info(Command):
+ description = "create a distribution's .egg-info directory"
+
+ user_options = [
+ ('egg-base=', 'e', "directory containing .egg-info directories"
+ " (default: top of the source tree)"),
+ ('tag-svn-revision', 'r',
+ "Add subversion revision ID to version number"),
+ ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+ ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+ ]
+
+ boolean_options = ['tag-date','tag-svn-revision']
+
+ def initialize_options (self):
+ self.egg_name = None
+ self.egg_version = None
+ self.egg_base = None
+ self.egg_info = None
+ self.tag_build = None
+ self.tag_svn_revision = 0
+ self.tag_date = 0
+ self.broken_egg_info = False
+
+ def finalize_options (self):
+ self.egg_name = safe_name(self.distribution.get_name())
+ self.egg_version = self.tagged_version()
+
+ try:
+ list(
+ parse_requirements('%s==%s' % (self.egg_name,self.egg_version))
+ )
+ except ValueError:
+ raise DistutilsOptionError(
+ "Invalid distribution name or version syntax: %s-%s" %
+ (self.egg_name,self.egg_version)
+ )
+
+ if self.egg_base is None:
+ dirs = self.distribution.package_dir
+ self.egg_base = (dirs or {}).get('',os.curdir)
+
+ self.ensure_dirname('egg_base')
+ self.egg_info = to_filename(self.egg_name)+'.egg-info'
+ if self.egg_base != os.curdir:
+ self.egg_info = os.path.join(self.egg_base, self.egg_info)
+ if '-' in self.egg_name: self.check_broken_egg_info()
+
+ # Set package version for the benefit of dumber commands
+ # (e.g. sdist, bdist_wininst, etc.)
+ #
+ self.distribution.metadata.version = self.egg_version
+
+ # If we bootstrapped around the lack of a PKG-INFO, as might be the
+ # case in a fresh checkout, make sure that any special tags get added
+ # to the version info
+ #
+ pd = self.distribution._patched_dist
+ if pd is not None and pd.key==self.egg_name.lower():
+ pd._version = self.egg_version
+ pd._parsed_version = parse_version(self.egg_version)
+ self.distribution._patched_dist = None
+
+
+
+ def write_or_delete_file(self, what, filename, data, force=False):
+ """Write `data` to `filename` or delete if empty
+
+ If `data` is non-empty, this routine is the same as ``write_file()``.
+ If `data` is empty but not ``None``, this is the same as calling
+ ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
+ unless `filename` exists, in which case a warning is issued about the
+ orphaned file (if `force` is false), or deleted (if `force` is true).
+ """
+ if data:
+ self.write_file(what, filename, data)
+ elif os.path.exists(filename):
+ if data is None and not force:
+ log.warn(
+ "%s not set in setup(), but %s exists", what, filename
+ )
+ return
+ else:
+ self.delete_file(filename)
+
+ def write_file(self, what, filename, data):
+ """Write `data` to `filename` (if not a dry run) after announcing it
+
+ `what` is used in a log message to identify what is being written
+ to the file.
+ """
+ log.info("writing %s to %s", what, filename)
+ if not self.dry_run:
+ f = open(filename, 'wb')
+ f.write(data)
+ f.close()
+
+ def delete_file(self, filename):
+ """Delete `filename` (if not a dry run) after announcing it"""
+ log.info("deleting %s", filename)
+ if not self.dry_run:
+ os.unlink(filename)
+
+
+
+
+ def run(self):
+ self.mkpath(self.egg_info)
+ installer = self.distribution.fetch_build_egg
+ for ep in iter_entry_points('egg_info.writers'):
+ writer = ep.load(installer=installer)
+ writer(self, ep.name, os.path.join(self.egg_info,ep.name))
+ self.find_sources()
+
+ def tagged_version(self):
+ version = self.distribution.get_version()
+ if self.tag_build:
+ version+=self.tag_build
+ if self.tag_svn_revision and (
+ os.path.exists('.svn') or os.path.exists('PKG-INFO')
+ ): version += '-r%s' % self.get_svn_revision()
+ if self.tag_date:
+ import time; version += time.strftime("-%Y%m%d")
+ return safe_version(version)
+
+ def get_svn_revision(self):
+ revision = 0
+ urlre = re.compile('url="([^"]+)"')
+ revre = re.compile('committed-rev="(\d+)"')
+ for base,dirs,files in os.walk(os.curdir):
+ if '.svn' not in dirs:
+ dirs[:] = []
+ continue # no sense walking uncontrolled subdirs
+ dirs.remove('.svn')
+ f = open(os.path.join(base,'.svn','entries'))
+ data = f.read()
+ f.close()
+ dirurl = urlre.search(data).group(1) # get repository URL
+ if base==os.curdir:
+ base_url = dirurl+'/' # save the root url
+ elif not dirurl.startswith(base_url):
+ dirs[:] = []
+ continue # not part of the same svn tree, skip it
+ for match in revre.finditer(data):
+ revision = max(revision, int(match.group(1)))
+ return str(revision or get_pkg_info_revision())
+
+ def find_sources(self):
+ """Generate SOURCES.txt manifest file"""
+ manifest_filename = os.path.join(self.egg_info,"SOURCES.txt")
+ mm = manifest_maker(self.distribution)
+ mm.manifest = manifest_filename
+ mm.run()
+ self.filelist = mm.filelist
+
+ def check_broken_egg_info(self):
+ bei = self.egg_name+'.egg-info'
+ if self.egg_base != os.curdir:
+ bei = os.path.join(self.egg_base, bei)
+ if os.path.exists(bei):
+ log.warn(
+ "-"*78+'\n'
+ "Note: Your current .egg-info directory has a '-' in its name;"
+ '\nthis will not work correctly with "setup.py develop".\n\n'
+ 'Please rename %s to %s to correct this problem.\n'+'-'*78,
+ bei, self.egg_info
+ )
+ self.broken_egg_info = self.egg_info
+ self.egg_info = bei # make it work for now
+
+class FileList(FileList):
+ """File list that accepts only existing, platform-independent paths"""
+
+ def append(self, item):
+ path = convert_path(item)
+ if os.path.exists(path):
+ self.files.append(path)
+
+
+
+
+
+
+
+
+
+
+
+class manifest_maker(sdist):
+
+ template = "MANIFEST.in"
+
+ def initialize_options (self):
+ self.use_defaults = 1
+ self.prune = 1
+ self.manifest_only = 1
+ self.force_manifest = 1
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ self.filelist = FileList()
+ if not os.path.exists(self.manifest):
+ self.write_manifest() # it must exist so it'll get in the list
+ self.filelist.findall()
+ self.add_defaults()
+ if os.path.exists(self.template):
+ self.read_template()
+ self.prune_file_list()
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ self.write_manifest()
+
+ def write_manifest (self):
+ """Write the file list in 'self.filelist' (presumably as filled in
+ by 'add_defaults()' and 'read_template()') to the manifest file
+ named by 'self.manifest'.
+ """
+ files = self.filelist.files
+ if os.sep!='/':
+ files = [f.replace(os.sep,'/') for f in files]
+ self.execute(file_util.write_file, (self.manifest, files),
+ "writing manifest file '%s'" % self.manifest)
+
+
+
+
+
+ def add_defaults(self):
+ sdist.add_defaults(self)
+ self.filelist.append(self.template)
+ self.filelist.append(self.manifest)
+ rcfiles = list(walk_revctrl())
+ if rcfiles:
+ self.filelist.extend(rcfiles)
+ elif os.path.exists(self.manifest):
+ self.read_manifest()
+ ei_cmd = self.get_finalized_command('egg_info')
+ self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
+
+ def prune_file_list (self):
+ build = self.get_finalized_command('build')
+ base_dir = self.distribution.get_fullname()
+ self.filelist.exclude_pattern(None, prefix=build.build_base)
+ self.filelist.exclude_pattern(None, prefix=base_dir)
+ sep = re.escape(os.sep)
+ self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def write_pkg_info(cmd, basename, filename):
+ log.info("writing %s", filename)
+ if not cmd.dry_run:
+ metadata = cmd.distribution.metadata
+ metadata.version, oldver = cmd.egg_version, metadata.version
+ metadata.name, oldname = cmd.egg_name, metadata.name
+ try:
+ # write unescaped data to PKG-INFO, so older pkg_resources
+ # can still parse it
+ metadata.write_pkg_info(cmd.egg_info)
+ finally:
+ metadata.name, metadata.version = oldname, oldver
+
+ safe = getattr(cmd.distribution,'zip_safe',None)
+ import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe)
+
+def warn_depends_obsolete(cmd, basename, filename):
+ if os.path.exists(filename):
+ log.warn(
+ "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+
+def write_requirements(cmd, basename, filename):
+ dist = cmd.distribution
+ data = ['\n'.join(yield_lines(dist.install_requires or ()))]
+ for extra,reqs in (dist.extras_require or {}).items():
+ data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs))))
+ cmd.write_or_delete_file("requirements", filename, ''.join(data))
+
+def write_toplevel_names(cmd, basename, filename):
+ pkgs = dict.fromkeys(
+ [k.split('.',1)[0]
+ for k in cmd.distribution.iter_distribution_names()
+ ]
+ )
+ cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n')
+
+
+
+def overwrite_arg(cmd, basename, filename):
+ write_arg(cmd, basename, filename, True)
+
+def write_arg(cmd, basename, filename, force=False):
+ argname = os.path.splitext(basename)[0]
+ value = getattr(cmd.distribution, argname, None)
+ if value is not None:
+ value = '\n'.join(value)+'\n'
+ cmd.write_or_delete_file(argname, filename, value, force)
+
+def write_entries(cmd, basename, filename):
+ ep = cmd.distribution.entry_points
+
+ if isinstance(ep,basestring) or ep is None:
+ data = ep
+ elif ep is not None:
+ data = []
+ for section, contents in ep.items():
+ if not isinstance(contents,basestring):
+ contents = EntryPoint.parse_group(section, contents)
+ contents = '\n'.join(map(str,contents.values()))
+ data.append('[%s]\n%s\n\n' % (section,contents))
+ data = ''.join(data)
+
+ cmd.write_or_delete_file('entry points', filename, data, True)
+
+def get_pkg_info_revision():
+ # See if we can get a -r### off of PKG-INFO, in case this is an sdist of
+ # a subversion revision
+ #
+ if os.path.exists('PKG-INFO'):
+ f = open('PKG-INFO','rU')
+ for line in f:
+ match = re.match(r"Version:.*-r(\d+)\s*$", line)
+ if match:
+ return int(match.group(1))
+ return 0
diff --git a/Lib/setuptools/command/install.py b/Lib/setuptools/command/install.py
new file mode 100644
index 0000000..bfb9af5
--- /dev/null
+++ b/Lib/setuptools/command/install.py
@@ -0,0 +1,101 @@
+import setuptools, sys
+from distutils.command.install import install as _install
+from distutils.errors import DistutilsArgError
+
+class install(_install):
+ """Use easy_install to install the package, w/dependencies"""
+
+ user_options = _install.user_options + [
+ ('old-and-unmanageable', None, "Try not to use this!"),
+ ('single-version-externally-managed', None,
+ "used by system package builders to create 'flat' eggs"),
+ ]
+ boolean_options = _install.boolean_options + [
+ 'old-and-unmanageable', 'single-version-externally-managed',
+ ]
+ new_commands = [
+ ('install_egg_info', lambda self: True),
+ ('install_scripts', lambda self: True),
+ ]
+ _nc = dict(new_commands)
+ sub_commands = [
+ cmd for cmd in _install.sub_commands if cmd[0] not in _nc
+ ] + new_commands
+
+ def initialize_options(self):
+ _install.initialize_options(self)
+ self.old_and_unmanageable = None
+ self.single_version_externally_managed = None
+ self.no_compile = None # make DISTUTILS_DEBUG work right!
+
+ def finalize_options(self):
+ _install.finalize_options(self)
+ if self.root:
+ self.single_version_externally_managed = True
+ elif self.single_version_externally_managed:
+ if not self.root and not self.record:
+ raise DistutilsArgError(
+ "You must specify --record or --root when building system"
+ " packages"
+ )
+
+ def handle_extra_path(self):
+ # We always ignore extra_path, because we install as .egg or .egg-info
+ self.path_file = None
+ self.extra_dirs = ''
+
+ def run(self):
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return _install.run(self)
+
+ # Attempt to detect whether we were called from setup() or by another
+ # command. If we were called by setup(), our caller will be the
+ # 'run_command' method in 'distutils.dist', and *its* caller will be
+ # the 'run_commands' method. If we were called any other way, our
+ # immediate caller *might* be 'run_command', but it won't have been
+ # called by 'run_commands'. This is slightly kludgy, but seems to
+ # work.
+ #
+ caller = sys._getframe(2)
+ caller_module = caller.f_globals.get('__name__','')
+ caller_name = caller.f_code.co_name
+
+ if caller_module != 'distutils.dist' or caller_name!='run_commands':
+ # We weren't called from the command line or setup(), so we
+ # should run in backward-compatibility mode to support bdist_*
+ # commands.
+ _install.run(self)
+ else:
+ self.do_egg_install()
+
+
+
+
+
+
+
+
+
+
+
+
+ def do_egg_install(self):
+
+ from setuptools.command.easy_install import easy_install
+
+ cmd = easy_install(
+ self.distribution, args="x", root=self.root, record=self.record,
+ )
+ cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
+
+ self.run_command('bdist_egg')
+ args = [self.distribution.get_command_obj('bdist_egg').egg_output]
+
+ if setuptools.bootstrap_install_from:
+ # Bootstrap self-installation of setuptools
+ args.insert(0, setuptools.bootstrap_install_from)
+
+ cmd.args = args
+ cmd.run()
+ setuptools.bootstrap_install_from = None
diff --git a/Lib/setuptools/command/install_egg_info.py b/Lib/setuptools/command/install_egg_info.py
new file mode 100755
index 0000000..193e91a
--- /dev/null
+++ b/Lib/setuptools/command/install_egg_info.py
@@ -0,0 +1,81 @@
+from setuptools import Command
+from setuptools.archive_util import unpack_archive
+from distutils import log, dir_util
+import os, shutil, pkg_resources
+
+class install_egg_info(Command):
+ """Install an .egg-info directory for the package"""
+
+ description = "Install an .egg-info directory for the package"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',('install_dir','install_dir'))
+ ei_cmd = self.get_finalized_command("egg_info")
+ basename = pkg_resources.Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version
+ ).egg_name()+'.egg-info'
+ self.source = ei_cmd.egg_info
+ self.target = os.path.join(self.install_dir, basename)
+ self.outputs = [self.target]
+
+ def run(self):
+ self.run_command('egg_info')
+ target = self.target
+ if os.path.isdir(self.target) and not os.path.islink(self.target):
+ dir_util.remove_tree(self.target, dry_run=self.dry_run)
+ elif os.path.exists(self.target):
+ self.execute(os.unlink,(self.target,),"Removing "+self.target)
+ if not self.dry_run:
+ pkg_resources.ensure_directory(self.target)
+ self.execute(self.copytree, (),
+ "Copying %s to %s" % (self.source, self.target)
+ )
+ self.install_namespaces()
+
+ def get_outputs(self):
+ return self.outputs
+
+ def copytree(self):
+ # Copy the .egg-info tree to site-packages
+ def skimmer(src,dst):
+ # filter out source-control directories; note that 'src' is always
+ # a '/'-separated path, regardless of platform. 'dst' is a
+ # platform-specific path.
+ for skip in '.svn/','CVS/':
+ if src.startswith(skip) or '/'+skip in src:
+ return None
+ self.outputs.append(dst)
+ log.debug("Copying %s to %s", src, dst)
+ return dst
+ unpack_archive(self.source, self.target, skimmer)
+
+ def install_namespaces(self):
+ nsp = (self.distribution.namespace_packages or [])[:]
+ if not nsp: return
+ nsp.sort() # set up shorter names first
+ filename,ext = os.path.splitext(self.target)
+ filename += '-nspkg.pth'; self.outputs.append(filename)
+ log.info("Installing %s",filename)
+ if not self.dry_run:
+ f = open(filename,'wb')
+ for pkg in nsp:
+ pth = tuple(pkg.split('.'))
+ f.write(
+ "import sys,new,os; "
+ "p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
+ "*%(pth)r); "
+ "ie = os.path.exists(os.path.join(p,'__init__.py')); "
+ "m = not ie and "
+ "sys.modules.setdefault(%(pkg)r,new.module(%(pkg)r)); "
+ "mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
+ "(p not in mp) and mp.append(p)\n"
+ % locals()
+ )
+ f.close()
diff --git a/Lib/setuptools/command/install_lib.py b/Lib/setuptools/command/install_lib.py
new file mode 100644
index 0000000..96c8dfe
--- /dev/null
+++ b/Lib/setuptools/command/install_lib.py
@@ -0,0 +1,76 @@
+from distutils.command.install_lib import install_lib as _install_lib
+import os
+
+class install_lib(_install_lib):
+ """Don't add compiled flags to filenames of non-Python files"""
+
+ def _bytecode_filenames (self, py_filenames):
+ bytecode_files = []
+ for py_file in py_filenames:
+ if not py_file.endswith('.py'):
+ continue
+ if self.compile:
+ bytecode_files.append(py_file + "c")
+ if self.optimize > 0:
+ bytecode_files.append(py_file + "o")
+
+ return bytecode_files
+
+ def run(self):
+ self.build()
+ outfiles = self.install()
+ if outfiles is not None:
+ # always compile, in case we have any extension stubs to deal with
+ self.byte_compile(outfiles)
+
+ def get_exclusions(self):
+ exclude = {}
+ nsp = self.distribution.namespace_packages
+
+ if (nsp and self.get_finalized_command('install')
+ .single_version_externally_managed
+ ):
+ for pkg in nsp:
+ parts = pkg.split('.')
+ while parts:
+ pkgdir = os.path.join(self.install_dir, *parts)
+ for f in '__init__.py', '__init__.pyc', '__init__.pyo':
+ exclude[os.path.join(pkgdir,f)] = 1
+ parts.pop()
+ return exclude
+
+ def copy_tree(
+ self, infile, outfile,
+ preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
+ ):
+ assert preserve_mode and preserve_times and not preserve_symlinks
+ exclude = self.get_exclusions()
+
+ if not exclude:
+ return _install_lib.copy_tree(self, infile, outfile)
+
+ # Exclude namespace package __init__.py* files from the output
+
+ from setuptools.archive_util import unpack_directory
+ from distutils import log
+
+ outfiles = []
+
+ def pf(src, dst):
+ if dst in exclude:
+ log.warn("Skipping installation of %s (namespace package)",dst)
+ return False
+
+ log.info("copying %s -> %s", src, os.path.dirname(dst))
+ outfiles.append(dst)
+ return dst
+
+ unpack_directory(infile, outfile, pf)
+ return outfiles
+
+ def get_outputs(self):
+ outputs = _install_lib.get_outputs(self)
+ exclude = self.get_exclusions()
+ if exclude:
+ return [f for f in outputs if f not in exclude]
+ return outputs
diff --git a/Lib/setuptools/command/install_scripts.py b/Lib/setuptools/command/install_scripts.py
new file mode 100755
index 0000000..69558bf
--- /dev/null
+++ b/Lib/setuptools/command/install_scripts.py
@@ -0,0 +1,56 @@
+from distutils.command.install_scripts import install_scripts \
+ as _install_scripts
+from easy_install import get_script_args, sys_executable
+from pkg_resources import Distribution, PathMetadata, ensure_directory
+import os
+from distutils import log
+
+class install_scripts(_install_scripts):
+ """Do normal script install, plus any egg_info wrapper scripts"""
+
+ def initialize_options(self):
+ _install_scripts.initialize_options(self)
+ self.no_ep = False
+
+ def run(self):
+ self.run_command("egg_info")
+ if self.distribution.scripts:
+ _install_scripts.run(self) # run first to set up self.outfiles
+ else:
+ self.outfiles = []
+ if self.no_ep:
+ # don't install entry point scripts into .egg file!
+ return
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ dist = Distribution(
+ ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
+ ei_cmd.egg_name, ei_cmd.egg_version,
+ )
+ bs_cmd = self.get_finalized_command('build_scripts')
+ executable = getattr(bs_cmd,'executable',sys_executable)
+
+ for args in get_script_args(dist, executable):
+ self.write_script(*args)
+
+
+
+
+
+
+
+ def write_script(self, script_name, contents, mode="t", *ignored):
+ """Write an executable file to the scripts directory"""
+ log.info("Installing %s script to %s", script_name, self.install_dir)
+ target = os.path.join(self.install_dir, script_name)
+ self.outfiles.append(target)
+
+ if not self.dry_run:
+ ensure_directory(target)
+ f = open(target,"w"+mode)
+ f.write(contents)
+ f.close()
+ try:
+ os.chmod(target,0755)
+ except (AttributeError, os.error):
+ pass
diff --git a/Lib/setuptools/command/rotate.py b/Lib/setuptools/command/rotate.py
new file mode 100755
index 0000000..8aab312
--- /dev/null
+++ b/Lib/setuptools/command/rotate.py
@@ -0,0 +1,57 @@
+import distutils, os
+from setuptools import Command
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import *
+
+class rotate(Command):
+ """Delete older distributions"""
+
+ description = "delete older distributions, keeping N newest files"
+ user_options = [
+ ('match=', 'm', "patterns to match (required)"),
+ ('dist-dir=', 'd', "directory where the distributions are"),
+ ('keep=', 'k', "number of matching distributions to keep"),
+ ]
+
+ boolean_options = []
+
+ def initialize_options(self):
+ self.match = None
+ self.dist_dir = None
+ self.keep = None
+
+ def finalize_options(self):
+ if self.match is None:
+ raise DistutilsOptionError(
+ "Must specify one or more (comma-separated) match patterns "
+ "(e.g. '.zip' or '.egg')"
+ )
+ if self.keep is None:
+ raise DistutilsOptionError("Must specify number of files to keep")
+ try:
+ self.keep = int(self.keep)
+ except ValueError:
+ raise DistutilsOptionError("--keep must be an integer")
+ if isinstance(self.match, basestring):
+ self.match = [
+ convert_path(p.strip()) for p in self.match.split(',')
+ ]
+ self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
+
+ def run(self):
+ self.run_command("egg_info")
+ from glob import glob
+ for pattern in self.match:
+ pattern = self.distribution.get_name()+'*'+pattern
+ files = glob(os.path.join(self.dist_dir,pattern))
+ files = [(os.path.getmtime(f),f) for f in files]
+ files.sort()
+ files.reverse()
+
+ log.info("%d file(s) matching %s", len(files), pattern)
+ files = files[self.keep:]
+ for (t,f) in files:
+ log.info("Deleting %s", f)
+ if not self.dry_run:
+ os.unlink(f)
diff --git a/Lib/setuptools/command/saveopts.py b/Lib/setuptools/command/saveopts.py
new file mode 100755
index 0000000..9c58d72
--- /dev/null
+++ b/Lib/setuptools/command/saveopts.py
@@ -0,0 +1,24 @@
+import distutils, os
+from setuptools import Command
+from setuptools.command.setopt import edit_config, option_base
+
+class saveopts(option_base):
+ """Save command-line options to a file"""
+
+ description = "save supplied options to setup.cfg or other config file"
+
+ def run(self):
+ dist = self.distribution
+ commands = dist.command_options.keys()
+ settings = {}
+
+ for cmd in commands:
+
+ if cmd=='saveopts':
+ continue # don't save our own options!
+
+ for opt,(src,val) in dist.get_option_dict(cmd).items():
+ if src=="command line":
+ settings.setdefault(cmd,{})[opt] = val
+
+ edit_config(self.filename, settings, self.dry_run)
diff --git a/Lib/setuptools/command/sdist.py b/Lib/setuptools/command/sdist.py
new file mode 100755
index 0000000..829cd3c
--- /dev/null
+++ b/Lib/setuptools/command/sdist.py
@@ -0,0 +1,163 @@
+from distutils.command.sdist import sdist as _sdist
+from distutils.util import convert_path
+import os, re, sys, pkg_resources
+
+entities = [
+ ("&lt;","<"), ("&gt;", ">"), ("&quot;", '"'), ("&apos;", "'"),
+ ("&amp;", "&")
+]
+
+def unescape(data):
+ for old,new in entities:
+ data = data.replace(old,new)
+ return data
+
+def re_finder(pattern, postproc=None):
+ def find(dirname, filename):
+ f = open(filename,'rU')
+ data = f.read()
+ f.close()
+ for match in pattern.finditer(data):
+ path = match.group(1)
+ if postproc:
+ path = postproc(path)
+ yield joinpath(dirname,path)
+ return find
+
+def joinpath(prefix,suffix):
+ if not prefix:
+ return suffix
+ return os.path.join(prefix,suffix)
+
+
+
+
+
+
+
+
+
+
+
+def walk_revctrl(dirname=''):
+ """Find all files under revision control"""
+ for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
+ for item in ep.load()(dirname):
+ yield item
+
+def _default_revctrl(dirname=''):
+ for path, finder in finders:
+ path = joinpath(dirname,path)
+ if os.path.isfile(path):
+ for path in finder(dirname,path):
+ if os.path.isfile(path):
+ yield path
+ elif os.path.isdir(path):
+ for item in _default_revctrl(path):
+ yield item
+
+def externals_finder(dirname, filename):
+ """Find any 'svn:externals' directories"""
+ found = False
+ f = open(filename,'rb')
+ for line in iter(f.readline, ''): # can't use direct iter!
+ parts = line.split()
+ if len(parts)==2:
+ kind,length = parts
+ data = f.read(int(length))
+ if kind=='K' and data=='svn:externals':
+ found = True
+ elif kind=='V' and found:
+ f.close()
+ break
+ else:
+ f.close()
+ return
+
+ for line in data.splitlines():
+ parts = line.split()
+ if parts:
+ yield joinpath(dirname, parts[0])
+
+
+finders = [
+ (convert_path('CVS/Entries'),
+ re_finder(re.compile(r"^\w?/([^/]+)/", re.M))),
+ (convert_path('.svn/entries'),
+ re_finder(
+ re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I),
+ unescape
+ )
+ ),
+ (convert_path('.svn/dir-props'), externals_finder),
+]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class sdist(_sdist):
+ """Smart sdist that finds anything supported by revision control"""
+
+ user_options = [
+ ('formats=', None,
+ "formats for source distribution (comma-separated list)"),
+ ('keep-temp', 'k',
+ "keep the distribution tree around after creating " +
+ "archive file(s)"),
+ ('dist-dir=', 'd',
+ "directory to put the source distribution archive(s) in "
+ "[default: dist]"),
+ ]
+
+ negative_opt = {}
+
+ def run(self):
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ self.filelist = ei_cmd.filelist
+ self.filelist.append(os.path.join(ei_cmd.egg_info,'SOURCES.txt'))
+
+ self.check_metadata()
+ self.make_distribution()
+
+ dist_files = getattr(self.distribution,'dist_files',[])
+ for file in self.archive_files:
+ data = ('sdist', '', file)
+ if data not in dist_files:
+ dist_files.append(data)
+
+ def read_template(self):
+ try:
+ _sdist.read_template(self)
+ except:
+ # grody hack to close the template file (MANIFEST.in)
+ # this prevents easy_install's attempt at deleting the file from
+ # dying and thus masking the real error
+ sys.exc_info()[2].tb_next.tb_frame.f_locals['template'].close()
+ raise
diff --git a/Lib/setuptools/command/setopt.py b/Lib/setuptools/command/setopt.py
new file mode 100755
index 0000000..e0c1058
--- /dev/null
+++ b/Lib/setuptools/command/setopt.py
@@ -0,0 +1,158 @@
+import distutils, os
+from setuptools import Command
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import *
+
+__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
+
+
+def config_file(kind="local"):
+ """Get the filename of the distutils, local, global, or per-user config
+
+ `kind` must be one of "local", "global", or "user"
+ """
+ if kind=='local':
+ return 'setup.cfg'
+ if kind=='global':
+ return os.path.join(
+ os.path.dirname(distutils.__file__),'distutils.cfg'
+ )
+ if kind=='user':
+ dot = os.name=='posix' and '.' or ''
+ return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
+ raise ValueError(
+ "config_file() type must be 'local', 'global', or 'user'", kind
+ )
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def edit_config(filename, settings, dry_run=False):
+ """Edit a configuration file to include `settings`
+
+ `settings` is a dictionary of dictionaries or ``None`` values, keyed by
+ command/section name. A ``None`` value means to delete the entire section,
+ while a dictionary lists settings to be changed or deleted in that section.
+ A setting of ``None`` means to delete that setting.
+ """
+ from ConfigParser import RawConfigParser
+ log.debug("Reading configuration from %s", filename)
+ opts = RawConfigParser()
+ opts.read([filename])
+ for section, options in settings.items():
+ if options is None:
+ log.info("Deleting section [%s] from %s", section, filename)
+ opts.remove_section(section)
+ else:
+ if not opts.has_section(section):
+ log.debug("Adding new section [%s] to %s", section, filename)
+ opts.add_section(section)
+ for option,value in options.items():
+ if value is None:
+ log.debug("Deleting %s.%s from %s",
+ section, option, filename
+ )
+ opts.remove_option(section,option)
+ if not opts.options(section):
+ log.info("Deleting empty [%s] section from %s",
+ section, filename)
+ opts.remove_section(section)
+ else:
+ log.debug(
+ "Setting %s.%s to %r in %s",
+ section, option, value, filename
+ )
+ opts.set(section,option,value)
+
+ log.info("Writing %s", filename)
+ if not dry_run:
+ f = open(filename,'w'); opts.write(f); f.close()
+
+class option_base(Command):
+ """Abstract base class for commands that mess with config files"""
+
+ user_options = [
+ ('global-config', 'g',
+ "save options to the site-wide distutils.cfg file"),
+ ('user-config', 'u',
+ "save options to the current user's pydistutils.cfg file"),
+ ('filename=', 'f',
+ "configuration file to use (default=setup.cfg)"),
+ ]
+
+ boolean_options = [
+ 'global-config', 'user-config',
+ ]
+
+ def initialize_options(self):
+ self.global_config = None
+ self.user_config = None
+ self.filename = None
+
+ def finalize_options(self):
+ filenames = []
+ if self.global_config:
+ filenames.append(config_file('global'))
+ if self.user_config:
+ filenames.append(config_file('user'))
+ if self.filename is not None:
+ filenames.append(self.filename)
+ if not filenames:
+ filenames.append(config_file('local'))
+ if len(filenames)>1:
+ raise DistutilsOptionError(
+ "Must specify only one configuration file option",
+ filenames
+ )
+ self.filename, = filenames
+
+
+
+
+class setopt(option_base):
+ """Save command-line options to a file"""
+
+ description = "set an option in setup.cfg or another config file"
+
+ user_options = [
+ ('command=', 'c', 'command to set an option for'),
+ ('option=', 'o', 'option to set'),
+ ('set-value=', 's', 'value of the option'),
+ ('remove', 'r', 'remove (unset) the value'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.command = None
+ self.option = None
+ self.set_value = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.command is None or self.option is None:
+ raise DistutilsOptionError("Must specify --command *and* --option")
+ if self.set_value is None and not self.remove:
+ raise DistutilsOptionError("Must specify --set-value or --remove")
+
+ def run(self):
+ edit_config(
+ self.filename, {
+ self.command: {self.option.replace('-','_'):self.set_value}
+ },
+ self.dry_run
+ )
diff --git a/Lib/setuptools/command/test.py b/Lib/setuptools/command/test.py
new file mode 100644
index 0000000..01fca35
--- /dev/null
+++ b/Lib/setuptools/command/test.py
@@ -0,0 +1,119 @@
+from setuptools import Command
+from distutils.errors import DistutilsOptionError
+import sys
+from pkg_resources import *
+from unittest import TestLoader, main
+
+class ScanningLoader(TestLoader):
+
+ def loadTestsFromModule(self, module):
+ """Return a suite of all tests cases contained in the given module
+
+ If the module is a package, load tests from all the modules in it.
+ If the module has an ``additional_tests`` function, call it and add
+ the return value to the tests.
+ """
+ tests = []
+ if module.__name__!='setuptools.tests.doctest': # ugh
+ tests.append(TestLoader.loadTestsFromModule(self,module))
+
+ if hasattr(module, "additional_tests"):
+ tests.append(module.additional_tests())
+
+ if hasattr(module, '__path__'):
+ for file in resource_listdir(module.__name__, ''):
+ if file.endswith('.py') and file!='__init__.py':
+ submodule = module.__name__+'.'+file[:-3]
+ else:
+ if resource_exists(
+ module.__name__, file+'/__init__.py'
+ ):
+ submodule = module.__name__+'.'+file
+ else:
+ continue
+ tests.append(self.loadTestsFromName(submodule))
+
+ if len(tests)!=1:
+ return self.suiteClass(tests)
+ else:
+ return tests[0] # don't create a nested suite for only one return
+
+
+class test(Command):
+
+ """Command to run unit tests after in-place build"""
+
+ description = "run unit tests after in-place build"
+
+ user_options = [
+ ('test-module=','m', "Run 'test_suite' in specified module"),
+ ('test-suite=','s',
+ "Test suite to run (e.g. 'some_module.test_suite')"),
+ ]
+
+ def initialize_options(self):
+ self.test_suite = None
+ self.test_module = None
+ self.test_loader = None
+
+
+ def finalize_options(self):
+
+ if self.test_suite is None:
+ if self.test_module is None:
+ self.test_suite = self.distribution.test_suite
+ else:
+ self.test_suite = self.test_module+".test_suite"
+ elif self.test_module:
+ raise DistutilsOptionError(
+ "You may specify a module or a suite, but not both"
+ )
+
+ self.test_args = [self.test_suite]
+
+ if self.verbose:
+ self.test_args.insert(0,'--verbose')
+ if self.test_loader is None:
+ self.test_loader = getattr(self.distribution,'test_loader',None)
+ if self.test_loader is None:
+ self.test_loader = "setuptools.command.test:ScanningLoader"
+
+
+
+ def run(self):
+ # Ensure metadata is up-to-date
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ self.reinitialize_command('build_ext', inplace=1)
+ self.run_command('build_ext')
+
+ if self.distribution.tests_require:
+ self.distribution.fetch_build_eggs(self.distribution.tests_require)
+
+ if self.test_suite:
+ cmd = ' '.join(self.test_args)
+ if self.dry_run:
+ self.announce('skipping "unittest %s" (dry run)' % cmd)
+ else:
+ self.announce('running "unittest %s"' % cmd)
+ self.run_tests()
+
+
+ def run_tests(self):
+ import unittest
+ old_path = sys.path[:]
+ ei_cmd = self.get_finalized_command("egg_info")
+ path_item = normalize_path(ei_cmd.egg_base)
+ metadata = PathMetadata(
+ path_item, normalize_path(ei_cmd.egg_info)
+ )
+ dist = Distribution(path_item, metadata, project_name=ei_cmd.egg_name)
+ working_set.add(dist)
+ require(str(dist.as_requirement()))
+ loader_ep = EntryPoint.parse("x="+self.test_loader)
+ loader_class = loader_ep.load(require=False)
+ unittest.main(
+ None, None, [unittest.__file__]+self.test_args,
+ testLoader = loader_class()
+ )
diff --git a/Lib/setuptools/command/upload.py b/Lib/setuptools/command/upload.py
new file mode 100755
index 0000000..644c400
--- /dev/null
+++ b/Lib/setuptools/command/upload.py
@@ -0,0 +1,178 @@
+"""distutils.command.upload
+
+Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
+
+from distutils.errors import *
+from distutils.core import Command
+from distutils.spawn import spawn
+from distutils import log
+from md5 import md5
+import os
+import socket
+import platform
+import ConfigParser
+import httplib
+import base64
+import urlparse
+import cStringIO as StringIO
+
+class upload(Command):
+
+ description = "upload binary package to PyPI"
+
+ DEFAULT_REPOSITORY = 'http://www.python.org/pypi'
+
+ user_options = [
+ ('repository=', 'r',
+ "url of repository [default: %s]" % DEFAULT_REPOSITORY),
+ ('show-response', None,
+ 'display full response text from server'),
+ ('sign', 's',
+ 'sign files to upload using gpg'),
+ ('identity=', 'i', 'GPG identity used to sign files'),
+ ]
+ boolean_options = ['show-response', 'sign']
+
+ def initialize_options(self):
+ self.username = ''
+ self.password = ''
+ self.repository = ''
+ self.show_response = 0
+ self.sign = False
+ self.identity = None
+
+ def finalize_options(self):
+ if self.identity and not self.sign:
+ raise DistutilsOptionError(
+ "Must use --sign for --identity to have meaning"
+ )
+ if os.environ.has_key('HOME'):
+ rc = os.path.join(os.environ['HOME'], '.pypirc')
+ if os.path.exists(rc):
+ self.announce('Using PyPI login from %s' % rc)
+ config = ConfigParser.ConfigParser({
+ 'username':'',
+ 'password':'',
+ 'repository':''})
+ config.read(rc)
+ if not self.repository:
+ self.repository = config.get('server-login', 'repository')
+ if not self.username:
+ self.username = config.get('server-login', 'username')
+ if not self.password:
+ self.password = config.get('server-login', 'password')
+ if not self.repository:
+ self.repository = self.DEFAULT_REPOSITORY
+
+ def run(self):
+ if not self.distribution.dist_files:
+ raise DistutilsOptionError("No dist file created in earlier command")
+ for command, pyversion, filename in self.distribution.dist_files:
+ self.upload_file(command, pyversion, filename)
+
+ def upload_file(self, command, pyversion, filename):
+ # Sign if requested
+ if self.sign:
+ gpg_args = ["gpg", "--detach-sign", "-a", filename]
+ if self.identity:
+ gpg_args[2:2] = ["--local-user", self.identity]
+ spawn(gpg_args,
+ dry_run=self.dry_run)
+
+ # Fill in the data
+ content = open(filename,'rb').read()
+ basename = os.path.basename(filename)
+ comment = ''
+ if command=='bdist_egg' and self.distribution.has_ext_modules():
+ comment = "built on %s" % platform.platform(terse=1)
+ data = {
+ ':action':'file_upload',
+ 'protcol_version':'1',
+ 'name':self.distribution.get_name(),
+ 'version':self.distribution.get_version(),
+ 'content':(basename,content),
+ 'filetype':command,
+ 'pyversion':pyversion,
+ 'md5_digest':md5(content).hexdigest(),
+ }
+ if command == 'bdist_rpm':
+ dist, version, id = platform.dist()
+ if dist:
+ comment = 'built for %s %s' % (dist, version)
+ elif command == 'bdist_dumb':
+ comment = 'built for %s' % platform.platform(terse=1)
+ data['comment'] = comment
+
+ if self.sign:
+ data['gpg_signature'] = (os.path.basename(filename) + ".asc",
+ open(filename+".asc").read())
+
+ # set up the authentication
+ auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip()
+
+ # Build up the MIME payload for the POST data
+ boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+ sep_boundary = '\n--' + boundary
+ end_boundary = sep_boundary + '--'
+ body = StringIO.StringIO()
+ for key, value in data.items():
+ # handle multiple entries for the same name
+ if type(value) != type([]):
+ value = [value]
+ for value in value:
+ if type(value) is tuple:
+ fn = ';filename="%s"' % value[0]
+ value = value[1]
+ else:
+ fn = ""
+ value = str(value)
+ body.write(sep_boundary)
+ body.write('\nContent-Disposition: form-data; name="%s"'%key)
+ body.write(fn)
+ body.write("\n\n")
+ body.write(value)
+ if value and value[-1] == '\r':
+ body.write('\n') # write an extra newline (lurve Macs)
+ body.write(end_boundary)
+ body.write("\n")
+ body = body.getvalue()
+
+ self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
+
+ # build the Request
+ # We can't use urllib2 since we need to send the Basic
+ # auth right with the first request
+ schema, netloc, url, params, query, fragments = \
+ urlparse.urlparse(self.repository)
+ assert not params and not query and not fragments
+ if schema == 'http':
+ http = httplib.HTTPConnection(netloc)
+ elif schema == 'https':
+ http = httplib.HTTPSConnection(netloc)
+ else:
+ raise AssertionError, "unsupported schema "+schema
+
+ data = ''
+ loglevel = log.INFO
+ try:
+ http.connect()
+ http.putrequest("POST", url)
+ http.putheader('Content-type',
+ 'multipart/form-data; boundary=%s'%boundary)
+ http.putheader('Content-length', str(len(body)))
+ http.putheader('Authorization', auth)
+ http.endheaders()
+ http.send(body)
+ except socket.error, e:
+ self.announce(e.msg, log.ERROR)
+ return
+
+ r = http.getresponse()
+ if r.status == 200:
+ self.announce('Server response (%s): %s' % (r.status, r.reason),
+ log.INFO)
+ else:
+ self.announce('Upload failed (%s): %s' % (r.status, r.reason),
+ log.ERROR)
+ if self.show_response:
+ print '-'*75, r.read(), '-'*75
diff --git a/Lib/setuptools/depends.py b/Lib/setuptools/depends.py
new file mode 100644
index 0000000..68d8194
--- /dev/null
+++ b/Lib/setuptools/depends.py
@@ -0,0 +1,239 @@
+from __future__ import generators
+import sys, imp, marshal
+from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
+from distutils.version import StrictVersion, LooseVersion
+
+__all__ = [
+ 'Require', 'find_module', 'get_module_constant', 'extract_constant'
+]
+
+class Require:
+ """A prerequisite to building or installing a distribution"""
+
+ def __init__(self,name,requested_version,module,homepage='',
+ attribute=None,format=None
+ ):
+
+ if format is None and requested_version is not None:
+ format = StrictVersion
+
+ if format is not None:
+ requested_version = format(requested_version)
+ if attribute is None:
+ attribute = '__version__'
+
+ self.__dict__.update(locals())
+ del self.self
+
+
+ def full_name(self):
+ """Return full package/distribution name, w/version"""
+ if self.requested_version is not None:
+ return '%s-%s' % (self.name,self.requested_version)
+ return self.name
+
+
+ def version_ok(self,version):
+ """Is 'version' sufficiently up-to-date?"""
+ return self.attribute is None or self.format is None or \
+ str(version)<>"unknown" and version >= self.requested_version
+
+
+ def get_version(self, paths=None, default="unknown"):
+
+ """Get version number of installed module, 'None', or 'default'
+
+ Search 'paths' for module. If not found, return 'None'. If found,
+ return the extracted version attribute, or 'default' if no version
+ attribute was specified, or the value cannot be determined without
+ importing the module. The version is formatted according to the
+ requirement's version format (if any), unless it is 'None' or the
+ supplied 'default'.
+ """
+
+ if self.attribute is None:
+ try:
+ f,p,i = find_module(self.module,paths)
+ if f: f.close()
+ return default
+ except ImportError:
+ return None
+
+ v = get_module_constant(self.module,self.attribute,default,paths)
+
+ if v is not None and v is not default and self.format is not None:
+ return self.format(v)
+
+ return v
+
+
+ def is_present(self,paths=None):
+ """Return true if dependency is present on 'paths'"""
+ return self.get_version(paths) is not None
+
+
+ def is_current(self,paths=None):
+ """Return true if dependency is present and up-to-date on 'paths'"""
+ version = self.get_version(paths)
+ if version is None:
+ return False
+ return self.version_ok(version)
+
+
+def _iter_code(code):
+
+ """Yield '(op,arg)' pair for each operation in code object 'code'"""
+
+ from array import array
+ from dis import HAVE_ARGUMENT, EXTENDED_ARG
+
+ bytes = array('b',code.co_code)
+ eof = len(code.co_code)
+
+ ptr = 0
+ extended_arg = 0
+
+ while ptr<eof:
+
+ op = bytes[ptr]
+
+ if op>=HAVE_ARGUMENT:
+
+ arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
+ ptr += 3
+
+ if op==EXTENDED_ARG:
+ extended_arg = arg * 65536L
+ continue
+
+ else:
+ arg = None
+ ptr += 1
+
+ yield op,arg
+
+
+
+
+
+
+
+
+
+
+def find_module(module, paths=None):
+ """Just like 'imp.find_module()', but with package support"""
+
+ parts = module.split('.')
+
+ while parts:
+ part = parts.pop(0)
+ f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
+
+ if kind==PKG_DIRECTORY:
+ parts = parts or ['__init__']
+ paths = [path]
+
+ elif parts:
+ raise ImportError("Can't find %r in %s" % (parts,module))
+
+ return info
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+
+ """Find 'module' by searching 'paths', and extract 'symbol'
+
+ Return 'None' if 'module' does not exist on 'paths', or it does not define
+ 'symbol'. If the module defines 'symbol' as a constant, return the
+ constant. Otherwise, return 'default'."""
+
+ try:
+ f, path, (suffix,mode,kind) = find_module(module,paths)
+ except ImportError:
+ # Module doesn't exist
+ return None
+
+ try:
+ if kind==PY_COMPILED:
+ f.read(8) # skip magic & date
+ code = marshal.load(f)
+ elif kind==PY_FROZEN:
+ code = imp.get_frozen_object(module)
+ elif kind==PY_SOURCE:
+ code = compile(f.read(), path, 'exec')
+ else:
+ # Not something we can parse; we'll have to import it. :(
+ if module not in sys.modules:
+ imp.load_module(module,f,path,(suffix,mode,kind))
+ return getattr(sys.modules[module],symbol,None)
+
+ finally:
+ if f:
+ f.close()
+
+ return extract_constant(code,symbol,default)
+
+
+
+
+
+
+
+
+def extract_constant(code,symbol,default=-1):
+
+ """Extract the constant value of 'symbol' from 'code'
+
+ If the name 'symbol' is bound to a constant value by the Python code
+ object 'code', return that value. If 'symbol' is bound to an expression,
+ return 'default'. Otherwise, return 'None'.
+
+ Return value is based on the first assignment to 'symbol'. 'symbol' must
+ be a global, or at least a non-"fast" local in the code block. That is,
+ only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+ must be present in 'code.co_names'.
+ """
+
+ if symbol not in code.co_names:
+ # name's not there, can't possibly be an assigment
+ return None
+
+ name_idx = list(code.co_names).index(symbol)
+
+ STORE_NAME = 90
+ STORE_GLOBAL = 97
+ LOAD_CONST = 100
+
+ const = default
+
+ for op, arg in _iter_code(code):
+
+ if op==LOAD_CONST:
+ const = code.co_consts[arg]
+ elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
+ return const
+ else:
+ const = default
diff --git a/Lib/setuptools/dist.py b/Lib/setuptools/dist.py
new file mode 100644
index 0000000..f0417c1
--- /dev/null
+++ b/Lib/setuptools/dist.py
@@ -0,0 +1,798 @@
+__all__ = ['Distribution']
+
+from distutils.core import Distribution as _Distribution
+from setuptools.depends import Require
+from setuptools.command.install import install
+from setuptools.command.sdist import sdist
+from setuptools.command.install_lib import install_lib
+from distutils.errors import DistutilsOptionError, DistutilsPlatformError
+from distutils.errors import DistutilsSetupError
+import setuptools, pkg_resources, distutils.core, distutils.dist, distutils.cmd
+import os
+
+def _get_unpatched(cls):
+ """Protect against re-patching the distutils if reloaded
+
+ Also ensures that no other distutils extension monkeypatched the distutils
+ first.
+ """
+ while cls.__module__.startswith('setuptools'):
+ cls, = cls.__bases__
+ if not cls.__module__.startswith('distutils'):
+ raise AssertionError(
+ "distutils has already been patched by %r" % cls
+ )
+ return cls
+
+_Distribution = _get_unpatched(_Distribution)
+
+sequence = tuple, list
+
+def check_importable(dist, attr, value):
+ try:
+ ep = pkg_resources.EntryPoint.parse('x='+value)
+ assert not ep.extras
+ except (TypeError,ValueError,AttributeError,AssertionError):
+ raise DistutilsSetupError(
+ "%r must be importable 'module:attrs' string (got %r)"
+ % (attr,value)
+ )
+
+
+def assert_string_list(dist, attr, value):
+ """Verify that value is a string list or None"""
+ try:
+ assert ''.join(value)!=value
+ except (TypeError,ValueError,AttributeError,AssertionError):
+ raise DistutilsSetupError(
+ "%r must be a list of strings (got %r)" % (attr,value)
+ )
+
+def check_nsp(dist, attr, value):
+ """Verify that namespace packages are valid"""
+ assert_string_list(dist,attr,value)
+
+ for nsp in value:
+ if not dist.has_contents_for(nsp):
+ raise DistutilsSetupError(
+ "Distribution contains no modules or packages for " +
+ "namespace package %r" % nsp
+ )
+
+def check_extras(dist, attr, value):
+ """Verify that extras_require mapping is valid"""
+ try:
+ for k,v in value.items():
+ list(pkg_resources.parse_requirements(v))
+ except (TypeError,ValueError,AttributeError):
+ raise DistutilsSetupError(
+ "'extras_require' must be a dictionary whose values are "
+ "strings or lists of strings containing valid project/version "
+ "requirement specifiers."
+ )
+
+def assert_bool(dist, attr, value):
+ """Verify that value is True, False, 0, or 1"""
+ if bool(value) != value:
+ raise DistutilsSetupError(
+ "%r must be a boolean value (got %r)" % (attr,value)
+ )
+
+
+
+def check_requirements(dist, attr, value):
+ """Verify that install_requires is a valid requirements list"""
+ try:
+ list(pkg_resources.parse_requirements(value))
+ except (TypeError,ValueError):
+ raise DistutilsSetupError(
+ "%r must be a string or list of strings "
+ "containing valid project/version requirement specifiers" % (attr,)
+ )
+
+def check_entry_points(dist, attr, value):
+ """Verify that entry_points map is parseable"""
+ try:
+ pkg_resources.EntryPoint.parse_map(value)
+ except ValueError, e:
+ raise DistutilsSetupError(e)
+
+
+def check_test_suite(dist, attr, value):
+ if not isinstance(value,basestring):
+ raise DistutilsSetupError("test_suite must be a string")
+
+
+def check_package_data(dist, attr, value):
+ """Verify that value is a dictionary of package names to glob lists"""
+ if isinstance(value,dict):
+ for k,v in value.items():
+ if not isinstance(k,str): break
+ try: iter(v)
+ except TypeError:
+ break
+ else:
+ return
+ raise DistutilsSetupError(
+ attr+" must be a dictionary mapping package names to lists of "
+ "wildcard patterns"
+ )
+
+
+
+
+class Distribution(_Distribution):
+ """Distribution with support for features, tests, and package data
+
+ This is an enhanced version of 'distutils.dist.Distribution' that
+ effectively adds the following new optional keyword arguments to 'setup()':
+
+ 'install_requires' -- a string or sequence of strings specifying project
+ versions that the distribution requires when installed, in the format
+ used by 'pkg_resources.require()'. They will be installed
+ automatically when the package is installed. If you wish to use
+ packages that are not available in PyPI, or want to give your users an
+ alternate download location, you can add a 'find_links' option to the
+ '[easy_install]' section of your project's 'setup.cfg' file, and then
+ setuptools will scan the listed web pages for links that satisfy the
+ requirements.
+
+ 'extras_require' -- a dictionary mapping names of optional "extras" to the
+ additional requirement(s) that using those extras incurs. For example,
+ this::
+
+ extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+ indicates that the distribution can optionally provide an extra
+ capability called "reST", but it can only be used if docutils and
+ reSTedit are installed. If the user installs your package using
+ EasyInstall and requests one of your extras, the corresponding
+ additional requirements will be installed if needed.
+
+ 'features' -- a dictionary mapping option names to 'setuptools.Feature'
+ objects. Features are a portion of the distribution that can be
+ included or excluded based on user options, inter-feature dependencies,
+ and availability on the current system. Excluded features are omitted
+ from all setup commands, including source and binary distributions, so
+ you can create multiple distributions from the same source tree.
+ Feature names should be valid Python identifiers, except that they may
+ contain the '-' (minus) sign. Features can be included or excluded
+ via the command line options '--with-X' and '--without-X', where 'X' is
+ the name of the feature. Whether a feature is included by default, and
+ whether you are allowed to control this from the command line, is
+ determined by the Feature object. See the 'Feature' class for more
+ information.
+
+ 'test_suite' -- the name of a test suite to run for the 'test' command.
+ If the user runs 'python setup.py test', the package will be installed,
+ and the named test suite will be run. The format is the same as
+ would be used on a 'unittest.py' command line. That is, it is the
+ dotted name of an object to import and call to generate a test suite.
+
+ 'package_data' -- a dictionary mapping package names to lists of filenames
+ or globs to use to find data files contained in the named packages.
+ If the dictionary has filenames or globs listed under '""' (the empty
+ string), those names will be searched for in every package, in addition
+ to any names for the specific package. Data files found using these
+ names/globs will be installed along with the package, in the same
+ location as the package. Note that globs are allowed to reference
+ the contents of non-package subdirectories, as long as you use '/' as
+ a path separator. (Globs are automatically converted to
+ platform-specific paths at runtime.)
+
+ In addition to these new keywords, this class also has several new methods
+ for manipulating the distribution's contents. For example, the 'include()'
+ and 'exclude()' methods can be thought of as in-place add and subtract
+ commands that add or remove packages, modules, extensions, and so on from
+ the distribution. They are used by the feature subsystem to configure the
+ distribution for the included and excluded features.
+ """
+
+ _patched_dist = None
+
+ def patch_missing_pkg_info(self, attrs):
+ # Fake up a replacement for the data that would normally come from
+ # PKG-INFO, but which might not yet be built if this is a fresh
+ # checkout.
+ #
+ if not attrs or 'name' not in attrs or 'version' not in attrs:
+ return
+ key = pkg_resources.safe_name(str(attrs['name'])).lower()
+ dist = pkg_resources.working_set.by_key.get(key)
+ if dist is not None and not dist.has_metadata('PKG-INFO'):
+ dist._version = pkg_resources.safe_version(str(attrs['version']))
+ self._patched_dist = dist
+
+ def __init__ (self, attrs=None):
+ have_package_data = hasattr(self, "package_data")
+ if not have_package_data:
+ self.package_data = {}
+ self.require_features = []
+ self.features = {}
+ self.dist_files = []
+ self.patch_missing_pkg_info(attrs)
+ # Make sure we have any eggs needed to interpret 'attrs'
+ if attrs and 'dependency_links' in attrs:
+ self.dependency_links = attrs.pop('dependency_links')
+ assert_string_list(self,'dependency_links',self.dependency_links)
+ if attrs and 'setup_requires' in attrs:
+ self.fetch_build_eggs(attrs.pop('setup_requires'))
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ if not hasattr(self,ep.name):
+ setattr(self,ep.name,None)
+ _Distribution.__init__(self,attrs)
+ if isinstance(self.metadata.version, (int,long,float)):
+ # Some people apparently take "version number" too literally :)
+ self.metadata.version = str(self.metadata.version)
+
+ def parse_command_line(self):
+ """Process features after parsing command line options"""
+ result = _Distribution.parse_command_line(self)
+ if self.features:
+ self._finalize_features()
+ return result
+
+ def _feature_attrname(self,name):
+ """Convert feature name to corresponding option attribute name"""
+ return 'with_'+name.replace('-','_')
+
+ def fetch_build_eggs(self, requires):
+ """Resolve pre-setup requirements"""
+ from pkg_resources import working_set, parse_requirements
+ for dist in working_set.resolve(
+ parse_requirements(requires), installer=self.fetch_build_egg
+ ):
+ working_set.add(dist)
+
+ def finalize_options(self):
+ _Distribution.finalize_options(self)
+ if self.features:
+ self._set_global_opts_from_features()
+
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ value = getattr(self,ep.name,None)
+ if value is not None:
+ ep.require(installer=self.fetch_build_egg)
+ ep.load()(self, ep.name, value)
+
+ def fetch_build_egg(self, req):
+ """Fetch an egg needed for building"""
+ try:
+ cmd = self._egg_fetcher
+ except AttributeError:
+ from setuptools.command.easy_install import easy_install
+ dist = self.__class__({'script_args':['easy_install']})
+ dist.parse_config_files()
+ opts = dist.get_option_dict('easy_install')
+ keep = (
+ 'find_links', 'site_dirs', 'index_url', 'optimize',
+ 'site_dirs', 'allow_hosts'
+ )
+ for key in opts.keys():
+ if key not in keep:
+ del opts[key] # don't use any other settings
+ if self.dependency_links:
+ links = self.dependency_links[:]
+ if 'find_links' in opts:
+ links = opts['find_links'][1].split() + links
+ opts['find_links'] = ('setup', links)
+ cmd = easy_install(
+ dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
+ always_copy=False, build_directory=None, editable=False,
+ upgrade=False, multi_version=True, no_report = True
+ )
+ cmd.ensure_finalized()
+ self._egg_fetcher = cmd
+ return cmd.easy_install(req)
+
+ def _set_global_opts_from_features(self):
+ """Add --with-X/--without-X options based on optional features"""
+
+ go = []
+ no = self.negative_opt.copy()
+
+ for name,feature in self.features.items():
+ self._set_feature(name,None)
+ feature.validate(self)
+
+ if feature.optional:
+ descr = feature.description
+ incdef = ' (default)'
+ excdef=''
+ if not feature.include_by_default():
+ excdef, incdef = incdef, excdef
+
+ go.append(('with-'+name, None, 'include '+descr+incdef))
+ go.append(('without-'+name, None, 'exclude '+descr+excdef))
+ no['without-'+name] = 'with-'+name
+
+ self.global_options = self.feature_options = go + self.global_options
+ self.negative_opt = self.feature_negopt = no
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def _finalize_features(self):
+ """Add/remove features and resolve dependencies between them"""
+
+ # First, flag all the enabled items (and thus their dependencies)
+ for name,feature in self.features.items():
+ enabled = self.feature_is_included(name)
+ if enabled or (enabled is None and feature.include_by_default()):
+ feature.include_in(self)
+ self._set_feature(name,1)
+
+ # Then disable the rest, so that off-by-default features don't
+ # get flagged as errors when they're required by an enabled feature
+ for name,feature in self.features.items():
+ if not self.feature_is_included(name):
+ feature.exclude_from(self)
+ self._set_feature(name,0)
+
+
+ def get_command_class(self, command):
+ """Pluggable version of get_command_class()"""
+ if command in self.cmdclass:
+ return self.cmdclass[command]
+
+ for ep in pkg_resources.iter_entry_points('distutils.commands',command):
+ ep.require(installer=self.fetch_build_egg)
+ self.cmdclass[command] = cmdclass = ep.load()
+ return cmdclass
+ else:
+ return _Distribution.get_command_class(self, command)
+
+ def print_commands(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ cmdclass = ep.load(False) # don't require extras, we're not running
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.print_commands(self)
+
+
+
+
+
+ def _set_feature(self,name,status):
+ """Set feature's inclusion status"""
+ setattr(self,self._feature_attrname(name),status)
+
+ def feature_is_included(self,name):
+ """Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
+ return getattr(self,self._feature_attrname(name))
+
+ def include_feature(self,name):
+ """Request inclusion of feature named 'name'"""
+
+ if self.feature_is_included(name)==0:
+ descr = self.features[name].description
+ raise DistutilsOptionError(
+ descr + " is required, but was excluded or is not available"
+ )
+ self.features[name].include_in(self)
+ self._set_feature(name,1)
+
+ def include(self,**attrs):
+ """Add items to distribution that are named in keyword arguments
+
+ For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
+ the distribution's 'py_modules' attribute, if it was not already
+ there.
+
+ Currently, this method only supports inclusion for attributes that are
+ lists or tuples. If you need to add support for adding to other
+ attributes in this or a subclass, you can add an '_include_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
+ will try to call 'dist._include_foo({"bar":"baz"})', which can then
+ handle whatever special inclusion logic is needed.
+ """
+ for k,v in attrs.items():
+ include = getattr(self, '_include_'+k, None)
+ if include:
+ include(v)
+ else:
+ self._include_misc(k,v)
+
+ def exclude_package(self,package):
+ """Remove packages, modules, and extensions in named package"""
+
+ pfx = package+'.'
+ if self.packages:
+ self.packages = [
+ p for p in self.packages
+ if p<>package and not p.startswith(pfx)
+ ]
+
+ if self.py_modules:
+ self.py_modules = [
+ p for p in self.py_modules
+ if p<>package and not p.startswith(pfx)
+ ]
+
+ if self.ext_modules:
+ self.ext_modules = [
+ p for p in self.ext_modules
+ if p.name<>package and not p.name.startswith(pfx)
+ ]
+
+
+ def has_contents_for(self,package):
+ """Return true if 'exclude_package(package)' would do something"""
+
+ pfx = package+'.'
+
+ for p in self.iter_distribution_names():
+ if p==package or p.startswith(pfx):
+ return True
+
+
+
+
+
+
+
+
+
+
+ def _exclude_misc(self,name,value):
+ """Handle 'exclude()' for list/tuple attrs without a special handler"""
+ if not isinstance(value,sequence):
+ raise DistutilsSetupError(
+ "%s: setting must be a list or tuple (%r)" % (name, value)
+ )
+ try:
+ old = getattr(self,name)
+ except AttributeError:
+ raise DistutilsSetupError(
+ "%s: No such distribution setting" % name
+ )
+ if old is not None and not isinstance(old,sequence):
+ raise DistutilsSetupError(
+ name+": this setting cannot be changed via include/exclude"
+ )
+ elif old:
+ setattr(self,name,[item for item in old if item not in value])
+
+ def _include_misc(self,name,value):
+ """Handle 'include()' for list/tuple attrs without a special handler"""
+
+ if not isinstance(value,sequence):
+ raise DistutilsSetupError(
+ "%s: setting must be a list (%r)" % (name, value)
+ )
+ try:
+ old = getattr(self,name)
+ except AttributeError:
+ raise DistutilsSetupError(
+ "%s: No such distribution setting" % name
+ )
+ if old is None:
+ setattr(self,name,value)
+ elif not isinstance(old,sequence):
+ raise DistutilsSetupError(
+ name+": this setting cannot be changed via include/exclude"
+ )
+ else:
+ setattr(self,name,old+[item for item in value if item not in old])
+
+ def exclude(self,**attrs):
+ """Remove items from distribution that are named in keyword arguments
+
+ For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+ the distribution's 'py_modules' attribute. Excluding packages uses
+ the 'exclude_package()' method, so all of the package's contained
+ packages, modules, and extensions are also excluded.
+
+ Currently, this method only supports exclusion from attributes that are
+ lists or tuples. If you need to add support for excluding from other
+ attributes in this or a subclass, you can add an '_exclude_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
+ will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+ handle whatever special exclusion logic is needed.
+ """
+ for k,v in attrs.items():
+ exclude = getattr(self, '_exclude_'+k, None)
+ if exclude:
+ exclude(v)
+ else:
+ self._exclude_misc(k,v)
+
+ def _exclude_packages(self,packages):
+ if not isinstance(packages,sequence):
+ raise DistutilsSetupError(
+ "packages: setting must be a list or tuple (%r)" % (packages,)
+ )
+ map(self.exclude_package, packages)
+
+
+
+
+
+
+
+
+
+
+
+
+ def _parse_command_opts(self, parser, args):
+ # Remove --with-X/--without-X options when processing command args
+ self.global_options = self.__class__.global_options
+ self.negative_opt = self.__class__.negative_opt
+
+ # First, expand any aliases
+ command = args[0]
+ aliases = self.get_option_dict('aliases')
+ while command in aliases:
+ src,alias = aliases[command]
+ del aliases[command] # ensure each alias can expand only once!
+ import shlex
+ args[:1] = shlex.split(alias,True)
+ command = args[0]
+
+ nargs = _Distribution._parse_command_opts(self, parser, args)
+
+ # Handle commands that want to consume all remaining arguments
+ cmd_class = self.get_command_class(command)
+ if getattr(cmd_class,'command_consumes_arguments',None):
+ self.get_option_dict(command)['args'] = ("command line", nargs)
+ if nargs is not None:
+ return []
+
+ return nargs
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def get_cmdline_options(self):
+ """Return a '{cmd: {opt:val}}' map of all command-line options
+
+ Option names are all long, but do not include the leading '--', and
+ contain dashes rather than underscores. If the option doesn't take
+ an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+ Note that options provided by config files are intentionally excluded.
+ """
+
+ d = {}
+
+ for cmd,opts in self.command_options.items():
+
+ for opt,(src,val) in opts.items():
+
+ if src != "command line":
+ continue
+
+ opt = opt.replace('_','-')
+
+ if val==0:
+ cmdobj = self.get_command_obj(cmd)
+ neg_opt = self.negative_opt.copy()
+ neg_opt.update(getattr(cmdobj,'negative_opt',{}))
+ for neg,pos in neg_opt.items():
+ if pos==opt:
+ opt=neg
+ val=None
+ break
+ else:
+ raise AssertionError("Shouldn't be able to get here")
+
+ elif val==1:
+ val = None
+
+ d.setdefault(cmd,{})[opt] = val
+
+ return d
+
+
+ def iter_distribution_names(self):
+ """Yield all packages, modules, and extension names in distribution"""
+
+ for pkg in self.packages or ():
+ yield pkg
+
+ for module in self.py_modules or ():
+ yield module
+
+ for ext in self.ext_modules or ():
+ if isinstance(ext,tuple):
+ name,buildinfo = ext
+ yield name
+ else:
+ yield ext.name
+
+# Install it throughout the distutils
+for module in distutils.dist, distutils.core, distutils.cmd:
+ module.Distribution = Distribution
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class Feature:
+ """A subset of the distribution that can be excluded if unneeded/wanted
+
+ Features are created using these keyword arguments:
+
+ 'description' -- a short, human readable description of the feature, to
+ be used in error messages, and option help messages.
+
+ 'standard' -- if true, the feature is included by default if it is
+ available on the current system. Otherwise, the feature is only
+ included if requested via a command line '--with-X' option, or if
+ another included feature requires it. The default setting is 'False'.
+
+ 'available' -- if true, the feature is available for installation on the
+ current system. The default setting is 'True'.
+
+ 'optional' -- if true, the feature's inclusion can be controlled from the
+ command line, using the '--with-X' or '--without-X' options. If
+ false, the feature's inclusion status is determined automatically,
+ based on 'availabile', 'standard', and whether any other feature
+ requires it. The default setting is 'True'.
+
+ 'require_features' -- a string or sequence of strings naming features
+ that should also be included if this feature is included. Defaults to
+ empty list. May also contain 'Require' objects that should be
+ added/removed from the distribution.
+
+ 'remove' -- a string or list of strings naming packages to be removed
+ from the distribution if this feature is *not* included. If the
+ feature *is* included, this argument is ignored. This argument exists
+ to support removing features that "crosscut" a distribution, such as
+ defining a 'tests' feature that removes all the 'tests' subpackages
+ provided by other features. The default for this argument is an empty
+ list. (Note: the named package(s) or modules must exist in the base
+ distribution when the 'setup()' function is initially called.)
+
+ other keywords -- any other keyword arguments are saved, and passed to
+ the distribution's 'include()' and 'exclude()' methods when the
+ feature is included or excluded, respectively. So, for example, you
+ could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
+ added or removed from the distribution as appropriate.
+
+ A feature must include at least one 'requires', 'remove', or other
+ keyword argument. Otherwise, it can't affect the distribution in any way.
+ Note also that you can subclass 'Feature' to create your own specialized
+ feature types that modify the distribution in other ways when included or
+ excluded. See the docstrings for the various methods here for more detail.
+ Aside from the methods, the only feature attributes that distributions look
+ at are 'description' and 'optional'.
+ """
+ def __init__(self, description, standard=False, available=True,
+ optional=True, require_features=(), remove=(), **extras
+ ):
+
+ self.description = description
+ self.standard = standard
+ self.available = available
+ self.optional = optional
+ if isinstance(require_features,(str,Require)):
+ require_features = require_features,
+
+ self.require_features = [
+ r for r in require_features if isinstance(r,str)
+ ]
+ er = [r for r in require_features if not isinstance(r,str)]
+ if er: extras['require_features'] = er
+
+ if isinstance(remove,str):
+ remove = remove,
+ self.remove = remove
+ self.extras = extras
+
+ if not remove and not require_features and not extras:
+ raise DistutilsSetupError(
+ "Feature %s: must define 'require_features', 'remove', or at least one"
+ " of 'packages', 'py_modules', etc."
+ )
+
+ def include_by_default(self):
+ """Should this feature be included by default?"""
+ return self.available and self.standard
+
+ def include_in(self,dist):
+
+ """Ensure feature and its requirements are included in distribution
+
+ You may override this in a subclass to perform additional operations on
+ the distribution. Note that this method may be called more than once
+ per feature, and so should be idempotent.
+
+ """
+
+ if not self.available:
+ raise DistutilsPlatformError(
+ self.description+" is required,"
+ "but is not available on this platform"
+ )
+
+ dist.include(**self.extras)
+
+ for f in self.require_features:
+ dist.include_feature(f)
+
+
+
+ def exclude_from(self,dist):
+
+ """Ensure feature is excluded from distribution
+
+ You may override this in a subclass to perform additional operations on
+ the distribution. This method will be called at most once per
+ feature, and only after all included features have been asked to
+ include themselves.
+ """
+
+ dist.exclude(**self.extras)
+
+ if self.remove:
+ for item in self.remove:
+ dist.exclude_package(item)
+
+
+
+ def validate(self,dist):
+
+ """Verify that feature makes sense in context of distribution
+
+ This method is called by the distribution just before it parses its
+ command line. It checks to ensure that the 'remove' attribute, if any,
+ contains only valid package/module names that are present in the base
+ distribution when 'setup()' is called. You may override it in a
+ subclass to perform any other required validation of the feature
+ against a target distribution.
+ """
+
+ for item in self.remove:
+ if not dist.has_contents_for(item):
+ raise DistutilsSetupError(
+ "%s wants to be able to remove %s, but the distribution"
+ " doesn't contain any packages or modules under %s"
+ % (self.description, item, item)
+ )
diff --git a/Lib/setuptools/extension.py b/Lib/setuptools/extension.py
new file mode 100644
index 0000000..cfcf55b
--- /dev/null
+++ b/Lib/setuptools/extension.py
@@ -0,0 +1,35 @@
+from distutils.core import Extension as _Extension
+from dist import _get_unpatched
+_Extension = _get_unpatched(_Extension)
+
+try:
+ from Pyrex.Distutils.build_ext import build_ext
+except ImportError:
+ have_pyrex = False
+else:
+ have_pyrex = True
+
+
+class Extension(_Extension):
+ """Extension that uses '.c' files in place of '.pyx' files"""
+
+ if not have_pyrex:
+ # convert .pyx extensions to .c
+ def __init__(self,*args,**kw):
+ _Extension.__init__(self,*args,**kw)
+ sources = []
+ for s in self.sources:
+ if s.endswith('.pyx'):
+ sources.append(s[:-3]+'c')
+ else:
+ sources.append(s)
+ self.sources = sources
+
+class Library(Extension):
+ """Just like a regular Extension, but built as a library instead"""
+
+import sys, distutils.core, distutils.extension
+distutils.core.Extension = Extension
+distutils.extension.Extension = Extension
+if 'distutils.command.build_ext' in sys.modules:
+ sys.modules['distutils.command.build_ext'].Extension = Extension
diff --git a/Lib/setuptools/gui.exe b/Lib/setuptools/gui.exe
new file mode 100755
index 0000000..63ff35f
--- /dev/null
+++ b/Lib/setuptools/gui.exe
Binary files differ
diff --git a/Lib/setuptools/package_index.py b/Lib/setuptools/package_index.py
new file mode 100755
index 0000000..107e222
--- /dev/null
+++ b/Lib/setuptools/package_index.py
@@ -0,0 +1,674 @@
+"""PyPI and direct package downloading"""
+
+import sys, os.path, re, urlparse, urllib2, shutil, random, socket
+from pkg_resources import *
+from distutils import log
+from distutils.errors import DistutilsError
+from md5 import md5
+from fnmatch import translate
+
+EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
+HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
+# this is here to fix emacs' cruddy broken syntax highlighting
+PYPI_MD5 = re.compile(
+ '<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a href="[^?]+\?:action=show_md5'
+ '&amp;digest=([0-9a-f]{32})">md5</a>\\)'
+)
+
+URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
+EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
+
+__all__ = [
+ 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
+ 'interpret_distro_name',
+]
+
+
+def parse_bdist_wininst(name):
+ """Return (base,pyversion) or (None,None) for possible .exe name"""
+
+ lower = name.lower()
+ base, py_ver = None, None
+
+ if lower.endswith('.exe'):
+ if lower.endswith('.win32.exe'):
+ base = name[:-10]
+ elif lower.startswith('.win32-py',-16):
+ py_ver = name[-7:-4]
+ base = name[:-16]
+
+ return base,py_ver
+
+def egg_info_for_url(url):
+ scheme, server, path, parameters, query, fragment = urlparse.urlparse(url)
+ base = urllib2.unquote(path.split('/')[-1])
+ if '#' in base: base, fragment = base.split('#',1)
+ return base,fragment
+
+def distros_for_url(url, metadata=None):
+ """Yield egg or source distribution objects that might be found at a URL"""
+ base, fragment = egg_info_for_url(url)
+ dists = distros_for_location(url, base, metadata)
+ if fragment and not dists:
+ match = EGG_FRAGMENT.match(fragment)
+ if match:
+ return interpret_distro_name(
+ url, match.group(1), metadata, precedence = CHECKOUT_DIST
+ )
+ return dists
+
+def distros_for_location(location, basename, metadata=None):
+ """Yield egg or source distribution objects based on basename"""
+ if basename.endswith('.egg.zip'):
+ basename = basename[:-4] # strip the .zip
+ if basename.endswith('.egg'): # only one, unambiguous interpretation
+ return [Distribution.from_location(location, basename, metadata)]
+
+ if basename.endswith('.exe'):
+ win_base, py_ver = parse_bdist_wininst(basename)
+ if win_base is not None:
+ return interpret_distro_name(
+ location, win_base, metadata, py_ver, BINARY_DIST, "win32"
+ )
+
+ # Try source distro extensions (.zip, .tgz, etc.)
+ #
+ for ext in EXTENSIONS:
+ if basename.endswith(ext):
+ basename = basename[:-len(ext)]
+ return interpret_distro_name(location, basename, metadata)
+ return [] # no extension matched
+
+
+def distros_for_filename(filename, metadata=None):
+ """Yield possible egg or source distribution objects based on a filename"""
+ return distros_for_location(
+ normalize_path(filename), os.path.basename(filename), metadata
+ )
+
+
+def interpret_distro_name(location, basename, metadata,
+ py_version=None, precedence=SOURCE_DIST, platform=None
+):
+ """Generate alternative interpretations of a source distro name
+
+ Note: if `location` is a filesystem filename, you should call
+ ``pkg_resources.normalize_path()`` on it before passing it to this
+ routine!
+ """
+
+ # Generate alternative interpretations of a source distro name
+ # Because some packages are ambiguous as to name/versions split
+ # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
+ # So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
+ # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
+ # the spurious interpretations should be ignored, because in the event
+ # there's also an "adns" package, the spurious "python-1.1.0" version will
+ # compare lower than any numeric version number, and is therefore unlikely
+ # to match a request for it. It's still a potential problem, though, and
+ # in the long run PyPI and the distutils should go for "safe" names and
+ # versions in distribution archive names (sdist and bdist).
+
+ parts = basename.split('-')
+ for p in range(1,len(parts)+1):
+ yield Distribution(
+ location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+ py_version=py_version, precedence = precedence,
+ platform = platform
+ )
+
+
+
+
+
+class PackageIndex(Environment):
+ """A distribution index that scans web pages for download URLs"""
+
+ def __init__(self,index_url="http://www.python.org/pypi",hosts=('*',),*args,**kw):
+ Environment.__init__(self,*args,**kw)
+ self.index_url = index_url + "/"[:not index_url.endswith('/')]
+ self.scanned_urls = {}
+ self.fetched_urls = {}
+ self.package_pages = {}
+ self.allows = re.compile('|'.join(map(translate,hosts))).match
+ self.to_scan = []
+
+ def process_url(self, url, retrieve=False):
+ """Evaluate a URL as a possible download, and maybe retrieve it"""
+ url = fix_sf_url(url)
+ if url in self.scanned_urls and not retrieve:
+ return
+ self.scanned_urls[url] = True
+ if not URL_SCHEME(url):
+ self.process_filename(url)
+ return
+ else:
+ dists = list(distros_for_url(url))
+ if dists:
+ if not self.url_ok(url):
+ return
+ self.debug("Found link: %s", url)
+
+ if dists or not retrieve or url in self.fetched_urls:
+ map(self.add, dists)
+ return # don't need the actual page
+
+ if not self.url_ok(url):
+ self.fetched_urls[url] = True
+ return
+
+ self.info("Reading %s", url)
+ f = self.open_url(url)
+ self.fetched_urls[url] = self.fetched_urls[f.url] = True
+
+
+ if 'html' not in f.headers['content-type'].lower():
+ f.close() # not html, we can't process it
+ return
+
+ base = f.url # handle redirects
+ page = f.read()
+ f.close()
+ if url.startswith(self.index_url):
+ page = self.process_index(url, page)
+
+ for match in HREF.finditer(page):
+ link = urlparse.urljoin(base, match.group(1))
+ self.process_url(link)
+
+ def process_filename(self, fn, nested=False):
+ # process filenames or directories
+ if not os.path.exists(fn):
+ self.warn("Not found: %s", url)
+ return
+
+ if os.path.isdir(fn) and not nested:
+ path = os.path.realpath(fn)
+ for item in os.listdir(path):
+ self.process_filename(os.path.join(path,item), True)
+
+ dists = distros_for_filename(fn)
+ if dists:
+ self.debug("Found: %s", fn)
+ map(self.add, dists)
+
+ def url_ok(self, url, fatal=False):
+ if self.allows(urlparse.urlparse(url)[1]):
+ return True
+ msg = "\nLink to % s ***BLOCKED*** by --allow-hosts\n"
+ if fatal:
+ raise DistutilsError(msg % url)
+ else:
+ self.warn(msg, url)
+
+
+
+ def process_index(self,url,page):
+ """Process the contents of a PyPI page"""
+ def scan(link):
+ # Process a URL to see if it's for a package page
+ if link.startswith(self.index_url):
+ parts = map(
+ urllib2.unquote, link[len(self.index_url):].split('/')
+ )
+ if len(parts)==2:
+ # it's a package page, sanitize and index it
+ pkg = safe_name(parts[0])
+ ver = safe_version(parts[1])
+ self.package_pages.setdefault(pkg.lower(),{})[link] = True
+ return to_filename(pkg), to_filename(ver)
+ return None, None
+
+ if url==self.index_url or 'Index of Packages</title>' in page:
+ # process an index page into the package-page index
+ for match in HREF.finditer(page):
+ scan( urlparse.urljoin(url, match.group(1)) )
+ else:
+ pkg,ver = scan(url) # ensure this page is in the page index
+ # process individual package page
+ for tag in ("<th>Home Page", "<th>Download URL"):
+ pos = page.find(tag)
+ if pos!=-1:
+ match = HREF.search(page,pos)
+ if match:
+ # Process the found URL
+ new_url = urlparse.urljoin(url, match.group(1))
+ base, frag = egg_info_for_url(new_url)
+ if base.endswith('.py') and not frag:
+ if pkg and ver:
+ new_url+='#egg=%s-%s' % (pkg,ver)
+ else:
+ self.need_version_info(url)
+ self.scan_url(new_url)
+ return PYPI_MD5.sub(
+ lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
+ )
+
+ def need_version_info(self, url):
+ self.scan_all(
+ "Page at %s links to .py file(s) without version info; an index "
+ "scan is required.", url
+ )
+
+ def scan_all(self, msg=None, *args):
+ if self.index_url not in self.fetched_urls:
+ if msg: self.warn(msg,*args)
+ self.warn(
+ "Scanning index of all packages (this may take a while)"
+ )
+ self.scan_url(self.index_url)
+
+ def find_packages(self, requirement):
+ self.scan_url(self.index_url + requirement.unsafe_name+'/')
+
+ if not self.package_pages.get(requirement.key):
+ # Fall back to safe version of the name
+ self.scan_url(self.index_url + requirement.project_name+'/')
+
+ if not self.package_pages.get(requirement.key):
+ # We couldn't find the target package, so search the index page too
+ self.warn(
+ "Couldn't find index page for %r (maybe misspelled?)",
+ requirement.unsafe_name
+ )
+ self.scan_all()
+
+ for url in self.package_pages.get(requirement.key,()):
+ # scan each page that might be related to the desired package
+ self.scan_url(url)
+
+ def obtain(self, requirement, installer=None):
+ self.prescan(); self.find_packages(requirement)
+ for dist in self[requirement.key]:
+ if dist in requirement:
+ return dist
+ self.debug("%s does not match %s", requirement, dist)
+ return super(PackageIndex, self).obtain(requirement,installer)
+
+ def check_md5(self, cs, info, filename, tfp):
+ if re.match('md5=[0-9a-f]{32}$', info):
+ self.debug("Validating md5 checksum for %s", filename)
+ if cs.hexdigest()<>info[4:]:
+ tfp.close()
+ os.unlink(filename)
+ raise DistutilsError(
+ "MD5 validation failed for "+os.path.basename(filename)+
+ "; possible download problem?"
+ )
+
+ def add_find_links(self, urls):
+ """Add `urls` to the list that will be prescanned for searches"""
+ for url in urls:
+ if (
+ self.to_scan is None # if we have already "gone online"
+ or not URL_SCHEME(url) # or it's a local file/directory
+ or url.startswith('file:')
+ or list(distros_for_url(url)) # or a direct package link
+ ):
+ # then go ahead and process it now
+ self.scan_url(url)
+ else:
+ # otherwise, defer retrieval till later
+ self.to_scan.append(url)
+
+ def prescan(self):
+ """Scan urls scheduled for prescanning (e.g. --find-links)"""
+ if self.to_scan:
+ map(self.scan_url, self.to_scan)
+ self.to_scan = None # from now on, go ahead and process immediately
+
+
+
+
+
+
+
+
+
+
+ def download(self, spec, tmpdir):
+ """Locate and/or download `spec` to `tmpdir`, returning a local path
+
+ `spec` may be a ``Requirement`` object, or a string containing a URL,
+ an existing local filename, or a project/version requirement spec
+ (i.e. the string form of a ``Requirement`` object). If it is the URL
+ of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
+ that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
+ automatically created alongside the downloaded file.
+
+ If `spec` is a ``Requirement`` object or a string containing a
+ project/version requirement spec, this method returns the location of
+ a matching distribution (possibly after downloading it to `tmpdir`).
+ If `spec` is a locally existing file or directory name, it is simply
+ returned unchanged. If `spec` is a URL, it is downloaded to a subpath
+ of `tmpdir`, and the local filename is returned. Various errors may be
+ raised if a problem occurs during downloading.
+ """
+ if not isinstance(spec,Requirement):
+ scheme = URL_SCHEME(spec)
+ if scheme:
+ # It's a url, download it to tmpdir
+ found = self._download_url(scheme.group(1), spec, tmpdir)
+ base, fragment = egg_info_for_url(spec)
+ if base.endswith('.py'):
+ found = self.gen_setup(found,fragment,tmpdir)
+ return found
+ elif os.path.exists(spec):
+ # Existing file or directory, just return it
+ return spec
+ else:
+ try:
+ spec = Requirement.parse(spec)
+ except ValueError:
+ raise DistutilsError(
+ "Not a URL, existing file, or requirement spec: %r" %
+ (spec,)
+ )
+ return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
+
+
+ def fetch_distribution(self,
+ requirement, tmpdir, force_scan=False, source=False, develop_ok=False
+ ):
+ """Obtain a distribution suitable for fulfilling `requirement`
+
+ `requirement` must be a ``pkg_resources.Requirement`` instance.
+ If necessary, or if the `force_scan` flag is set, the requirement is
+ searched for in the (online) package index as well as the locally
+ installed packages. If a distribution matching `requirement` is found,
+ the returned distribution's ``location`` is the value you would have
+ gotten from calling the ``download()`` method with the matching
+ distribution's URL or filename. If no matching distribution is found,
+ ``None`` is returned.
+
+ If the `source` flag is set, only source distributions and source
+ checkout links will be considered. Unless the `develop_ok` flag is
+ set, development and system eggs (i.e., those using the ``.egg-info``
+ format) will be ignored.
+ """
+
+ # process a Requirement
+ self.info("Searching for %s", requirement)
+ skipped = {}
+
+ def find(req):
+ # Find a matching distribution; may be called more than once
+
+ for dist in self[req.key]:
+
+ if dist.precedence==DEVELOP_DIST and not develop_ok:
+ if dist not in skipped:
+ self.warn("Skipping development or system egg: %s",dist)
+ skipped[dist] = 1
+ continue
+
+ if dist in req and (dist.precedence<=SOURCE_DIST or not source):
+ self.info("Best match: %s", dist)
+ return dist.clone(
+ location=self.download(dist.location, tmpdir)
+ )
+
+ if force_scan:
+ self.prescan()
+ self.find_packages(requirement)
+
+ dist = find(requirement)
+ if dist is None and self.to_scan is not None:
+ self.prescan()
+ dist = find(requirement)
+
+ if dist is None and not force_scan:
+ self.find_packages(requirement)
+ dist = find(requirement)
+
+ if dist is None:
+ self.warn(
+ "No local packages or download links found for %s%s",
+ (source and "a source distribution of " or ""),
+ requirement,
+ )
+ return dist
+
+ def fetch(self, requirement, tmpdir, force_scan=False, source=False):
+ """Obtain a file suitable for fulfilling `requirement`
+
+ DEPRECATED; use the ``fetch_distribution()`` method now instead. For
+ backward compatibility, this routine is identical but returns the
+ ``location`` of the downloaded distribution instead of a distribution
+ object.
+ """
+ dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
+ if dist is not None:
+ return dist.location
+ return None
+
+
+
+
+
+
+
+
+ def gen_setup(self, filename, fragment, tmpdir):
+ match = EGG_FRAGMENT.match(fragment); #import pdb; pdb.set_trace()
+ dists = match and [d for d in
+ interpret_distro_name(filename, match.group(1), None) if d.version
+ ] or []
+
+ if len(dists)==1: # unambiguous ``#egg`` fragment
+ basename = os.path.basename(filename)
+
+ # Make sure the file has been downloaded to the temp dir.
+ if os.path.dirname(filename) != tmpdir:
+ dst = os.path.join(tmpdir, basename)
+ from setuptools.command.easy_install import samefile
+ if not samefile(filename, dst):
+ shutil.copy2(filename, dst)
+ filename=dst
+
+ file = open(os.path.join(tmpdir, 'setup.py'), 'w')
+ file.write(
+ "from setuptools import setup\n"
+ "setup(name=%r, version=%r, py_modules=[%r])\n"
+ % (
+ dists[0].project_name, dists[0].version,
+ os.path.splitext(basename)[0]
+ )
+ )
+ file.close()
+ return filename
+
+ elif match:
+ raise DistutilsError(
+ "Can't unambiguously interpret project/version identifier %r; "
+ "any dashes in the name or version should be escaped using "
+ "underscores. %r" % (fragment,dists)
+ )
+ else:
+ raise DistutilsError(
+ "Can't process plain .py files without an '#egg=name-version'"
+ " suffix to enable automatic setup script generation."
+ )
+
+ dl_blocksize = 8192
+ def _download_to(self, url, filename):
+ self.url_ok(url,True) # raises error if not allowed
+ self.info("Downloading %s", url)
+ # Download the file
+ fp, tfp, info = None, None, None
+ try:
+ if '#' in url:
+ url, info = url.split('#', 1)
+ fp = self.open_url(url)
+ if isinstance(fp, urllib2.HTTPError):
+ raise DistutilsError(
+ "Can't download %s: %s %s" % (url, fp.code,fp.msg)
+ )
+ cs = md5()
+ headers = fp.info()
+ blocknum = 0
+ bs = self.dl_blocksize
+ size = -1
+ if "content-length" in headers:
+ size = int(headers["Content-Length"])
+ self.reporthook(url, filename, blocknum, bs, size)
+ tfp = open(filename,'wb')
+ while True:
+ block = fp.read(bs)
+ if block:
+ cs.update(block)
+ tfp.write(block)
+ blocknum += 1
+ self.reporthook(url, filename, blocknum, bs, size)
+ else:
+ break
+ if info: self.check_md5(cs, info, filename, tfp)
+ return headers
+ finally:
+ if fp: fp.close()
+ if tfp: tfp.close()
+
+ def reporthook(self, url, filename, blocknum, blksize, size):
+ pass # no-op
+
+ def retry_sf_download(self, url, filename):
+ try:
+ return self._download_to(url, filename)
+ except:
+ scheme, server, path, param, query, frag = urlparse.urlparse(url)
+ if server!='dl.sourceforge.net':
+ raise
+
+ mirror = get_sf_ip()
+
+ while _sf_mirrors:
+ self.warn("Download failed: %s", sys.exc_info()[1])
+ url = urlparse.urlunparse((scheme, mirror, path, param, '', frag))
+ try:
+ return self._download_to(url, filename)
+ except:
+ _sf_mirrors.remove(mirror) # don't retry the same mirror
+ mirror = get_sf_ip()
+
+ raise # fail if no mirror works
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ def open_url(self, url):
+ try:
+ return urllib2.urlopen(url)
+ except urllib2.HTTPError, v:
+ return v
+ except urllib2.URLError, v:
+ raise DistutilsError("Download error: %s" % v.reason)
+
+
+ def _download_url(self, scheme, url, tmpdir):
+
+ # Determine download filename
+ #
+ name = filter(None,urlparse.urlparse(url)[2].split('/'))
+ if name:
+ name = name[-1]
+ while '..' in name:
+ name = name.replace('..','.').replace('\\','_')
+ else:
+ name = "__downloaded__" # default if URL has no path contents
+
+ if name.endswith('.egg.zip'):
+ name = name[:-4] # strip the extra .zip before download
+
+ filename = os.path.join(tmpdir,name)
+
+ # Download the file
+ #
+ if scheme=='svn' or scheme.startswith('svn+'):
+ return self._download_svn(url, filename)
+ else:
+ headers = self.retry_sf_download(url, filename)
+ if 'html' in headers['content-type'].lower():
+ return self._download_html(url, headers, filename, tmpdir)
+ else:
+ return filename
+
+ def scan_url(self, url):
+ self.process_url(url, True)
+
+
+ def _download_html(self, url, headers, filename, tmpdir):
+ file = open(filename)
+ for line in file:
+ if line.strip():
+ # Check for a subversion index page
+ if re.search(r'<title>Revision \d+:', line):
+ # it's a subversion index page:
+ file.close()
+ os.unlink(filename)
+ return self._download_svn(url, filename)
+ break # not an index page
+ file.close()
+ os.unlink(filename)
+ raise DistutilsError("Unexpected HTML page found at "+url)
+
+ def _download_svn(self, url, filename):
+ url = url.split('#',1)[0] # remove any fragment for svn's sake
+ self.info("Doing subversion checkout from %s to %s", url, filename)
+ os.system("svn checkout -q %s %s" % (url, filename))
+ return filename
+
+ def debug(self, msg, *args):
+ log.debug(msg, *args)
+
+ def info(self, msg, *args):
+ log.info(msg, *args)
+
+ def warn(self, msg, *args):
+ log.warn(msg, *args)
+
+
+
+
+
+
+
+
+
+
+
+
+def fix_sf_url(url):
+ scheme, server, path, param, query, frag = urlparse.urlparse(url)
+ if server!='prdownloads.sourceforge.net':
+ return url
+ return urlparse.urlunparse(
+ (scheme, 'dl.sourceforge.net', 'sourceforge'+path, param, '', frag)
+ )
+
+_sf_mirrors = []
+
+def get_sf_ip():
+ if not _sf_mirrors:
+ try:
+ _sf_mirrors[:] = socket.gethostbyname_ex('dl.sourceforge.net')[-1]
+ except socket.error:
+ # DNS-bl0ck1n9 f1r3w4llz sUx0rs!
+ _sf_mirrors[:] = ['dl.sourceforge.net']
+ return random.choice(_sf_mirrors)
diff --git a/Lib/setuptools/sandbox.py b/Lib/setuptools/sandbox.py
new file mode 100755
index 0000000..606944b
--- /dev/null
+++ b/Lib/setuptools/sandbox.py
@@ -0,0 +1,203 @@
+import os, sys, __builtin__, tempfile
+_os = sys.modules[os.name]
+_open = open
+from distutils.errors import DistutilsError
+__all__ = [
+ "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
+]
+
+def run_setup(setup_script, args):
+ """Run a distutils setup script, sandboxed in its directory"""
+
+ old_dir = os.getcwd()
+ save_argv = sys.argv[:]
+ save_path = sys.path[:]
+ setup_dir = os.path.abspath(os.path.dirname(setup_script))
+ temp_dir = os.path.join(setup_dir,'temp')
+ if not os.path.isdir(temp_dir): os.makedirs(temp_dir)
+ save_tmp = tempfile.tempdir
+
+ try:
+ tempfile.tempdir = temp_dir
+ os.chdir(setup_dir)
+ try:
+ sys.argv[:] = [setup_script]+list(args)
+ sys.path.insert(0, setup_dir)
+ DirectorySandbox(setup_dir).run(
+ lambda: execfile(
+ "setup.py",
+ {'__file__':setup_script, '__name__':'__main__'}
+ )
+ )
+ except SystemExit, v:
+ if v.args and v.args[0]:
+ raise
+ # Normal exit, just return
+ finally:
+ os.chdir(old_dir)
+ sys.path[:] = save_path
+ sys.argv[:] = save_argv
+ tempfile.tempdir = save_tmp
+
+class AbstractSandbox:
+ """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
+
+ _active = False
+
+ def __init__(self):
+ self._attrs = [
+ name for name in dir(_os)
+ if not name.startswith('_') and hasattr(self,name)
+ ]
+
+ def _copy(self, source):
+ for name in self._attrs:
+ setattr(os, name, getattr(source,name))
+
+ def run(self, func):
+ """Run 'func' under os sandboxing"""
+ try:
+ self._copy(self)
+ __builtin__.open = __builtin__.file = self._open
+ self._active = True
+ return func()
+ finally:
+ self._active = False
+ __builtin__.open = __builtin__.file = _open
+ self._copy(_os)
+
+
+ def _mk_dual_path_wrapper(name):
+ original = getattr(_os,name)
+ def wrap(self,src,dst,*args,**kw):
+ if self._active:
+ src,dst = self._remap_pair(name,src,dst,*args,**kw)
+ return original(src,dst,*args,**kw)
+ return wrap
+
+
+ for name in ["rename", "link", "symlink"]:
+ if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
+
+
+ def _mk_single_path_wrapper(name, original=None):
+ original = original or getattr(_os,name)
+ def wrap(self,path,*args,**kw):
+ if self._active:
+ path = self._remap_input(name,path,*args,**kw)
+ return original(path,*args,**kw)
+ return wrap
+
+ _open = _mk_single_path_wrapper('file', _open)
+ for name in [
+ "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
+ "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
+ "startfile", "mkfifo", "mknod", "pathconf", "access"
+ ]:
+ if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
+
+
+ def _mk_single_with_return(name):
+ original = getattr(_os,name)
+ def wrap(self,path,*args,**kw):
+ if self._active:
+ path = self._remap_input(name,path,*args,**kw)
+ return self._remap_output(name, original(path,*args,**kw))
+ return original(path,*args,**kw)
+ return wrap
+
+ for name in ['readlink', 'tempnam']:
+ if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
+
+ def _mk_query(name):
+ original = getattr(_os,name)
+ def wrap(self,*args,**kw):
+ retval = original(*args,**kw)
+ if self._active:
+ return self._remap_output(name, retval)
+ return retval
+ return wrap
+
+ for name in ['getcwd', 'tmpnam']:
+ if hasattr(_os,name): locals()[name] = _mk_query(name)
+
+ def _validate_path(self,path):
+ """Called to remap or validate any path, whether input or output"""
+ return path
+
+ def _remap_input(self,operation,path,*args,**kw):
+ """Called for path inputs"""
+ return self._validate_path(path)
+
+ def _remap_output(self,operation,path):
+ """Called for path outputs"""
+ return self._validate_path(path)
+
+ def _remap_pair(self,operation,src,dst,*args,**kw):
+ """Called for path pairs like rename, link, and symlink operations"""
+ return (
+ self._remap_input(operation+'-from',src,*args,**kw),
+ self._remap_input(operation+'-to',dst,*args,**kw)
+ )
+
+
+class DirectorySandbox(AbstractSandbox):
+ """Restrict operations to a single subdirectory - pseudo-chroot"""
+
+ write_ops = dict.fromkeys([
+ "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
+ "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
+ ])
+
+ def __init__(self,sandbox):
+ self._sandbox = os.path.normcase(os.path.realpath(sandbox))
+ self._prefix = os.path.join(self._sandbox,'')
+ AbstractSandbox.__init__(self)
+
+ def _violation(self, operation, *args, **kw):
+ raise SandboxViolation(operation, args, kw)
+
+ def _open(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU') and not self._ok(path):
+ self._violation("open", path, mode, *args, **kw)
+ return _open(path,mode,*args,**kw)
+
+ def tmpnam(self):
+ self._violation("tmpnam")
+
+ def _ok(self,path):
+ active = self._active
+ try:
+ self._active = False
+ realpath = os.path.normcase(os.path.realpath(path))
+ if realpath==self._sandbox or realpath.startswith(self._prefix):
+ return True
+ finally:
+ self._active = active
+
+ def _remap_input(self,operation,path,*args,**kw):
+ """Called for path inputs"""
+ if operation in self.write_ops and not self._ok(path):
+ self._violation(operation, os.path.realpath(path), *args, **kw)
+ return path
+
+ def _remap_pair(self,operation,src,dst,*args,**kw):
+ """Called for path pairs like rename, link, and symlink operations"""
+ if not self._ok(src) or not self._ok(dst):
+ self._violation(operation, src, dst, *args, **kw)
+ return (src,dst)
+
+
+class SandboxViolation(DistutilsError):
+ """A setup script attempted to modify the filesystem outside the sandbox"""
+
+ def __str__(self):
+ return """SandboxViolation: %s%r %s
+
+The package setup script has attempted to modify files on your system
+that are not within the EasyInstall build area, and has been aborted.
+
+This package cannot be safely installed by EasyInstall, and may not
+support alternate installation locations even if you run its setup
+script by hand. Please inform the package's author and the EasyInstall
+maintainers to find out if a fix or workaround is available.""" % self.args
diff --git a/Lib/setuptools/site-patch.py b/Lib/setuptools/site-patch.py
new file mode 100755
index 0000000..b1b27b9
--- /dev/null
+++ b/Lib/setuptools/site-patch.py
@@ -0,0 +1,74 @@
+def __boot():
+ import sys, imp, os, os.path
+ PYTHONPATH = os.environ.get('PYTHONPATH')
+ if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
+ PYTHONPATH = []
+ else:
+ PYTHONPATH = PYTHONPATH.split(os.pathsep)
+
+ pic = getattr(sys,'path_importer_cache',{})
+ stdpath = sys.path[len(PYTHONPATH):]
+ mydir = os.path.dirname(__file__)
+ #print "searching",stdpath,sys.path
+
+ for item in stdpath:
+ if item==mydir or not item:
+ continue # skip if current dir. on Windows, or my own directory
+ importer = pic.get(item)
+ if importer is not None:
+ loader = importer.find_module('site')
+ if loader is not None:
+ # This should actually reload the current module
+ loader.load_module('site')
+ break
+ else:
+ try:
+ stream, path, descr = imp.find_module('site',[item])
+ except ImportError:
+ continue
+ if stream is None:
+ continue
+ try:
+ # This should actually reload the current module
+ imp.load_module('site',stream,path,descr)
+ finally:
+ stream.close()
+ break
+ else:
+ raise ImportError("Couldn't find the real 'site' module")
+
+ #print "loaded", __file__
+
+ known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
+
+ oldpos = getattr(sys,'__egginsert',0) # save old insertion position
+ sys.__egginsert = 0 # and reset the current one
+
+ for item in PYTHONPATH:
+ addsitedir(item)
+
+ sys.__egginsert += oldpos # restore effective old position
+
+ d,nd = makepath(stdpath[0])
+ insert_at = None
+ new_path = []
+
+ for item in sys.path:
+ p,np = makepath(item)
+
+ if np==nd and insert_at is None:
+ # We've hit the first 'system' path entry, so added entries go here
+ insert_at = len(new_path)
+
+ if np in known_paths or insert_at is None:
+ new_path.append(item)
+ else:
+ # new path after the insert point, back-insert it
+ new_path.insert(insert_at, item)
+ insert_at += 1
+
+ sys.path[:] = new_path
+
+if __name__=='site':
+ __boot()
+ del __boot
diff --git a/Lib/setuptools/tests/__init__.py b/Lib/setuptools/tests/__init__.py
new file mode 100644
index 0000000..8a767dc
--- /dev/null
+++ b/Lib/setuptools/tests/__init__.py
@@ -0,0 +1,364 @@
+"""Tests for the 'setuptools' package"""
+
+from unittest import TestSuite, TestCase, makeSuite, defaultTestLoader
+import distutils.core, distutils.cmd
+from distutils.errors import DistutilsOptionError, DistutilsPlatformError
+from distutils.errors import DistutilsSetupError
+import setuptools, setuptools.dist
+from setuptools import Feature
+from distutils.core import Extension
+from setuptools.depends import extract_constant, get_module_constant
+from setuptools.depends import find_module, Require
+from distutils.version import StrictVersion, LooseVersion
+from distutils.util import convert_path
+import sys, os.path
+
+def additional_tests():
+ import doctest
+ return doctest.DocFileSuite(
+ 'api_tests.txt', optionflags=doctest.ELLIPSIS, package=__name__,
+ )
+
+
+def makeSetup(**args):
+ """Return distribution from 'setup(**args)', without executing commands"""
+
+ distutils.core._setup_stop_after = "commandline"
+
+ # Don't let system command line leak into tests!
+ args.setdefault('script_args',['install'])
+
+ try:
+ return setuptools.setup(**args)
+ finally:
+ distutils.core_setup_stop_after = None
+
+
+
+
+
+
+
+class DependsTests(TestCase):
+
+ def testExtractConst(self):
+
+ from setuptools.depends import extract_constant
+
+ def f1():
+ global x,y,z
+ x = "test"
+ y = z
+
+ # unrecognized name
+ self.assertEqual(extract_constant(f1.func_code,'q', -1), None)
+
+ # constant assigned
+ self.assertEqual(extract_constant(f1.func_code,'x', -1), "test")
+
+ # expression assigned
+ self.assertEqual(extract_constant(f1.func_code,'y', -1), -1)
+
+ # recognized name, not assigned
+ self.assertEqual(extract_constant(f1.func_code,'z', -1), None)
+
+
+ def testFindModule(self):
+ self.assertRaises(ImportError, find_module, 'no-such.-thing')
+ self.assertRaises(ImportError, find_module, 'setuptools.non-existent')
+ f,p,i = find_module('setuptools.tests'); f.close()
+
+ def testModuleExtract(self):
+ from distutils import __version__
+ self.assertEqual(
+ get_module_constant('distutils','__version__'), __version__
+ )
+ self.assertEqual(
+ get_module_constant('sys','version'), sys.version
+ )
+ self.assertEqual(
+ get_module_constant('setuptools.tests','__doc__'),__doc__
+ )
+
+ def testRequire(self):
+
+ req = Require('Distutils','1.0.3','distutils')
+
+ self.assertEqual(req.name, 'Distutils')
+ self.assertEqual(req.module, 'distutils')
+ self.assertEqual(req.requested_version, '1.0.3')
+ self.assertEqual(req.attribute, '__version__')
+ self.assertEqual(req.full_name(), 'Distutils-1.0.3')
+
+ from distutils import __version__
+ self.assertEqual(req.get_version(), __version__)
+ self.failUnless(req.version_ok('1.0.9'))
+ self.failIf(req.version_ok('0.9.1'))
+ self.failIf(req.version_ok('unknown'))
+
+ self.failUnless(req.is_present())
+ self.failUnless(req.is_current())
+
+ req = Require('Distutils 3000','03000','distutils',format=LooseVersion)
+ self.failUnless(req.is_present())
+ self.failIf(req.is_current())
+ self.failIf(req.version_ok('unknown'))
+
+ req = Require('Do-what-I-mean','1.0','d-w-i-m')
+ self.failIf(req.is_present())
+ self.failIf(req.is_current())
+
+ req = Require('Tests', None, 'tests', homepage="http://example.com")
+ self.assertEqual(req.format, None)
+ self.assertEqual(req.attribute, None)
+ self.assertEqual(req.requested_version, None)
+ self.assertEqual(req.full_name(), 'Tests')
+ self.assertEqual(req.homepage, 'http://example.com')
+
+ paths = [os.path.dirname(p) for p in __path__]
+ self.failUnless(req.is_present(paths))
+ self.failUnless(req.is_current(paths))
+
+
+
+class DistroTests(TestCase):
+
+ def setUp(self):
+ self.e1 = Extension('bar.ext',['bar.c'])
+ self.e2 = Extension('c.y', ['y.c'])
+
+ self.dist = makeSetup(
+ packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
+ py_modules=['b.d','x'],
+ ext_modules = (self.e1, self.e2),
+ package_dir = {},
+ )
+
+
+ def testDistroType(self):
+ self.failUnless(isinstance(self.dist,setuptools.dist.Distribution))
+
+
+ def testExcludePackage(self):
+ self.dist.exclude_package('a')
+ self.assertEqual(self.dist.packages, ['b','c'])
+
+ self.dist.exclude_package('b')
+ self.assertEqual(self.dist.packages, ['c'])
+ self.assertEqual(self.dist.py_modules, ['x'])
+ self.assertEqual(self.dist.ext_modules, [self.e1, self.e2])
+
+ self.dist.exclude_package('c')
+ self.assertEqual(self.dist.packages, [])
+ self.assertEqual(self.dist.py_modules, ['x'])
+ self.assertEqual(self.dist.ext_modules, [self.e1])
+
+ # test removals from unspecified options
+ makeSetup().exclude_package('x')
+
+
+
+
+
+
+
+ def testIncludeExclude(self):
+ # remove an extension
+ self.dist.exclude(ext_modules=[self.e1])
+ self.assertEqual(self.dist.ext_modules, [self.e2])
+
+ # add it back in
+ self.dist.include(ext_modules=[self.e1])
+ self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
+
+ # should not add duplicate
+ self.dist.include(ext_modules=[self.e1])
+ self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
+
+ def testExcludePackages(self):
+ self.dist.exclude(packages=['c','b','a'])
+ self.assertEqual(self.dist.packages, [])
+ self.assertEqual(self.dist.py_modules, ['x'])
+ self.assertEqual(self.dist.ext_modules, [self.e1])
+
+ def testEmpty(self):
+ dist = makeSetup()
+ dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
+ dist = makeSetup()
+ dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
+
+ def testContents(self):
+ self.failUnless(self.dist.has_contents_for('a'))
+ self.dist.exclude_package('a')
+ self.failIf(self.dist.has_contents_for('a'))
+
+ self.failUnless(self.dist.has_contents_for('b'))
+ self.dist.exclude_package('b')
+ self.failIf(self.dist.has_contents_for('b'))
+
+ self.failUnless(self.dist.has_contents_for('c'))
+ self.dist.exclude_package('c')
+ self.failIf(self.dist.has_contents_for('c'))
+
+
+
+
+ def testInvalidIncludeExclude(self):
+ self.assertRaises(DistutilsSetupError,
+ self.dist.include, nonexistent_option='x'
+ )
+ self.assertRaises(DistutilsSetupError,
+ self.dist.exclude, nonexistent_option='x'
+ )
+ self.assertRaises(DistutilsSetupError,
+ self.dist.include, packages={'x':'y'}
+ )
+ self.assertRaises(DistutilsSetupError,
+ self.dist.exclude, packages={'x':'y'}
+ )
+ self.assertRaises(DistutilsSetupError,
+ self.dist.include, ext_modules={'x':'y'}
+ )
+ self.assertRaises(DistutilsSetupError,
+ self.dist.exclude, ext_modules={'x':'y'}
+ )
+
+ self.assertRaises(DistutilsSetupError,
+ self.dist.include, package_dir=['q']
+ )
+ self.assertRaises(DistutilsSetupError,
+ self.dist.exclude, package_dir=['q']
+ )
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class FeatureTests(TestCase):
+
+ def setUp(self):
+ self.req = Require('Distutils','1.0.3','distutils')
+ self.dist = makeSetup(
+ features={
+ 'foo': Feature("foo",standard=True,require_features=['baz',self.req]),
+ 'bar': Feature("bar", standard=True, packages=['pkg.bar'],
+ py_modules=['bar_et'], remove=['bar.ext'],
+ ),
+ 'baz': Feature(
+ "baz", optional=False, packages=['pkg.baz'],
+ scripts = ['scripts/baz_it'],
+ libraries=[('libfoo','foo/foofoo.c')]
+ ),
+ 'dwim': Feature("DWIM", available=False, remove='bazish'),
+ },
+ script_args=['--without-bar', 'install'],
+ packages = ['pkg.bar', 'pkg.foo'],
+ py_modules = ['bar_et', 'bazish'],
+ ext_modules = [Extension('bar.ext',['bar.c'])]
+ )
+
+ def testDefaults(self):
+ self.failIf(
+ Feature(
+ "test",standard=True,remove='x',available=False
+ ).include_by_default()
+ )
+ self.failUnless(
+ Feature("test",standard=True,remove='x').include_by_default()
+ )
+ # Feature must have either kwargs, removes, or require_features
+ self.assertRaises(DistutilsSetupError, Feature, "test")
+
+ def testAvailability(self):
+ self.assertRaises(
+ DistutilsPlatformError,
+ self.dist.features['dwim'].include_in, self.dist
+ )
+
+ def testFeatureOptions(self):
+ dist = self.dist
+ self.failUnless(
+ ('with-dwim',None,'include DWIM') in dist.feature_options
+ )
+ self.failUnless(
+ ('without-dwim',None,'exclude DWIM (default)') in dist.feature_options
+ )
+ self.failUnless(
+ ('with-bar',None,'include bar (default)') in dist.feature_options
+ )
+ self.failUnless(
+ ('without-bar',None,'exclude bar') in dist.feature_options
+ )
+ self.assertEqual(dist.feature_negopt['without-foo'],'with-foo')
+ self.assertEqual(dist.feature_negopt['without-bar'],'with-bar')
+ self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim')
+ self.failIf('without-baz' in dist.feature_negopt)
+
+ def testUseFeatures(self):
+ dist = self.dist
+ self.assertEqual(dist.with_foo,1)
+ self.assertEqual(dist.with_bar,0)
+ self.assertEqual(dist.with_baz,1)
+ self.failIf('bar_et' in dist.py_modules)
+ self.failIf('pkg.bar' in dist.packages)
+ self.failUnless('pkg.baz' in dist.packages)
+ self.failUnless('scripts/baz_it' in dist.scripts)
+ self.failUnless(('libfoo','foo/foofoo.c') in dist.libraries)
+ self.assertEqual(dist.ext_modules,[])
+ self.assertEqual(dist.require_features, [self.req])
+
+ # If we ask for bar, it should fail because we explicitly disabled
+ # it on the command line
+ self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar')
+
+ def testFeatureWithInvalidRemove(self):
+ self.assertRaises(
+ SystemExit, makeSetup, features = {'x':Feature('x', remove='y')}
+ )
+
+class TestCommandTests(TestCase):
+
+ def testTestIsCommand(self):
+ test_cmd = makeSetup().get_command_obj('test')
+ self.failUnless(isinstance(test_cmd, distutils.cmd.Command))
+
+ def testLongOptSuiteWNoDefault(self):
+ ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite'])
+ ts1 = ts1.get_command_obj('test')
+ ts1.ensure_finalized()
+ self.assertEqual(ts1.test_suite, 'foo.tests.suite')
+
+ def testDefaultSuite(self):
+ ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test')
+ ts2.ensure_finalized()
+ self.assertEqual(ts2.test_suite, 'bar.tests.suite')
+
+ def testDefaultWModuleOnCmdLine(self):
+ ts3 = makeSetup(
+ test_suite='bar.tests',
+ script_args=['test','-m','foo.tests']
+ ).get_command_obj('test')
+ ts3.ensure_finalized()
+ self.assertEqual(ts3.test_module, 'foo.tests')
+ self.assertEqual(ts3.test_suite, 'foo.tests.test_suite')
+
+ def testConflictingOptions(self):
+ ts4 = makeSetup(
+ script_args=['test','-m','bar.tests', '-s','foo.tests.suite']
+ ).get_command_obj('test')
+ self.assertRaises(DistutilsOptionError, ts4.ensure_finalized)
+
+ def testNoSuite(self):
+ ts5 = makeSetup().get_command_obj('test')
+ ts5.ensure_finalized()
+ self.assertEqual(ts5.test_suite, None)
diff --git a/Lib/setuptools/tests/api_tests.txt b/Lib/setuptools/tests/api_tests.txt
new file mode 100755
index 0000000..735ad8d
--- /dev/null
+++ b/Lib/setuptools/tests/api_tests.txt
@@ -0,0 +1,330 @@
+Pluggable Distributions of Python Software
+==========================================
+
+Distributions
+-------------
+
+A "Distribution" is a collection of files that represent a "Release" of a
+"Project" as of a particular point in time, denoted by a
+"Version"::
+
+ >>> import sys, pkg_resources
+ >>> from pkg_resources import Distribution
+ >>> Distribution(project_name="Foo", version="1.2")
+ Foo 1.2
+
+Distributions have a location, which can be a filename, URL, or really anything
+else you care to use::
+
+ >>> dist = Distribution(
+ ... location="http://example.com/something",
+ ... project_name="Bar", version="0.9"
+ ... )
+
+ >>> dist
+ Bar 0.9 (http://example.com/something)
+
+
+Distributions have various introspectable attributes::
+
+ >>> dist.location
+ 'http://example.com/something'
+
+ >>> dist.project_name
+ 'Bar'
+
+ >>> dist.version
+ '0.9'
+
+ >>> dist.py_version == sys.version[:3]
+ True
+
+ >>> print dist.platform
+ None
+
+Including various computed attributes::
+
+ >>> from pkg_resources import parse_version
+ >>> dist.parsed_version == parse_version(dist.version)
+ True
+
+ >>> dist.key # case-insensitive form of the project name
+ 'bar'
+
+Distributions are compared (and hashed) by version first::
+
+ >>> Distribution(version='1.0') == Distribution(version='1.0')
+ True
+ >>> Distribution(version='1.0') == Distribution(version='1.1')
+ False
+ >>> Distribution(version='1.0') < Distribution(version='1.1')
+ True
+
+but also by project name (case-insensitive), platform, Python version,
+location, etc.::
+
+ >>> Distribution(project_name="Foo",version="1.0") == \
+ ... Distribution(project_name="Foo",version="1.0")
+ True
+
+ >>> Distribution(project_name="Foo",version="1.0") == \
+ ... Distribution(project_name="foo",version="1.0")
+ True
+
+ >>> Distribution(project_name="Foo",version="1.0") == \
+ ... Distribution(project_name="Foo",version="1.1")
+ False
+
+ >>> Distribution(project_name="Foo",py_version="2.3",version="1.0") == \
+ ... Distribution(project_name="Foo",py_version="2.4",version="1.0")
+ False
+
+ >>> Distribution(location="spam",version="1.0") == \
+ ... Distribution(location="spam",version="1.0")
+ True
+
+ >>> Distribution(location="spam",version="1.0") == \
+ ... Distribution(location="baz",version="1.0")
+ False
+
+
+
+Hash and compare distribution by prio/plat
+
+Get version from metadata
+provider capabilities
+egg_name()
+as_requirement()
+from_location, from_filename (w/path normalization)
+
+Releases may have zero or more "Requirements", which indicate
+what releases of another project the release requires in order to
+function. A Requirement names the other project, expresses some criteria
+as to what releases of that project are acceptable, and lists any "Extras"
+that the requiring release may need from that project. (An Extra is an
+optional feature of a Release, that can only be used if its additional
+Requirements are satisfied.)
+
+
+
+The Working Set
+---------------
+
+A collection of active distributions is called a Working Set. Note that a
+Working Set can contain any importable distribution, not just pluggable ones.
+For example, the Python standard library is an importable distribution that
+will usually be part of the Working Set, even though it is not pluggable.
+Similarly, when you are doing development work on a project, the files you are
+editing are also a Distribution. (And, with a little attention to the
+directory names used, and including some additional metadata, such a
+"development distribution" can be made pluggable as well.)
+
+ >>> from pkg_resources import WorkingSet
+
+A working set's entries are the sys.path entries that correspond to the active
+distributions. By default, the working set's entries are the items on
+``sys.path``::
+
+ >>> ws = WorkingSet()
+ >>> ws.entries == sys.path
+ True
+
+But you can also create an empty working set explicitly, and add distributions
+to it::
+
+ >>> ws = WorkingSet([])
+ >>> ws.add(dist)
+ >>> ws.entries
+ ['http://example.com/something']
+ >>> dist in ws
+ True
+ >>> Distribution('foo',version="") in ws
+ False
+
+And you can iterate over its distributions::
+
+ >>> list(ws)
+ [Bar 0.9 (http://example.com/something)]
+
+Adding the same distribution more than once is a no-op::
+
+ >>> ws.add(dist)
+ >>> list(ws)
+ [Bar 0.9 (http://example.com/something)]
+
+For that matter, adding multiple distributions for the same project also does
+nothing, because a working set can only hold one active distribution per
+project -- the first one added to it::
+
+ >>> ws.add(
+ ... Distribution(
+ ... 'http://example.com/something', project_name="Bar",
+ ... version="7.2"
+ ... )
+ ... )
+ >>> list(ws)
+ [Bar 0.9 (http://example.com/something)]
+
+You can append a path entry to a working set using ``add_entry()``::
+
+ >>> ws.entries
+ ['http://example.com/something']
+ >>> ws.add_entry(pkg_resources.__file__)
+ >>> ws.entries
+ ['http://example.com/something', '...pkg_resources.py...']
+
+Multiple additions result in multiple entries, even if the entry is already in
+the working set (because ``sys.path`` can contain the same entry more than
+once)::
+
+ >>> ws.add_entry(pkg_resources.__file__)
+ >>> ws.entries
+ ['...example.com...', '...pkg_resources...', '...pkg_resources...']
+
+And you can specify the path entry a distribution was found under, using the
+optional second parameter to ``add()``::
+
+ >>> ws = WorkingSet([])
+ >>> ws.add(dist,"foo")
+ >>> ws.entries
+ ['foo']
+
+But even if a distribution is found under multiple path entries, it still only
+shows up once when iterating the working set:
+
+ >>> ws.add_entry(ws.entries[0])
+ >>> list(ws)
+ [Bar 0.9 (http://example.com/something)]
+
+You can ask a WorkingSet to ``find()`` a distribution matching a requirement::
+
+ >>> from pkg_resources import Requirement
+ >>> print ws.find(Requirement.parse("Foo==1.0")) # no match, return None
+ None
+
+ >>> ws.find(Requirement.parse("Bar==0.9")) # match, return distribution
+ Bar 0.9 (http://example.com/something)
+
+Note that asking for a conflicting version of a distribution already in a
+working set triggers a ``pkg_resources.VersionConflict`` error:
+
+ >>> ws.find(Requirement.parse("Bar==1.0")) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ VersionConflict: (Bar 0.9 (http://example.com/something),
+ Requirement.parse('Bar==1.0'))
+
+You can subscribe a callback function to receive notifications whenever a new
+distribution is added to a working set. The callback is immediately invoked
+once for each existing distribution in the working set, and then is called
+again for new distributions added thereafter::
+
+ >>> def added(dist): print "Added", dist
+ >>> ws.subscribe(added)
+ Added Bar 0.9
+ >>> foo12 = Distribution(project_name="Foo", version="1.2", location="f12")
+ >>> ws.add(foo12)
+ Added Foo 1.2
+
+Note, however, that only the first distribution added for a given project name
+will trigger a callback, even during the initial ``subscribe()`` callback::
+
+ >>> foo14 = Distribution(project_name="Foo", version="1.4", location="f14")
+ >>> ws.add(foo14) # no callback, because Foo 1.2 is already active
+
+ >>> ws = WorkingSet([])
+ >>> ws.add(foo12)
+ >>> ws.add(foo14)
+ >>> ws.subscribe(added)
+ Added Foo 1.2
+
+And adding a callback more than once has no effect, either::
+
+ >>> ws.subscribe(added) # no callbacks
+
+ # and no double-callbacks on subsequent additions, either
+ >>> just_a_test = Distribution(project_name="JustATest", version="0.99")
+ >>> ws.add(just_a_test)
+ Added JustATest 0.99
+
+
+Finding Plugins
+---------------
+
+``WorkingSet`` objects can be used to figure out what plugins in an
+``Environment`` can be loaded without any resolution errors::
+
+ >>> from pkg_resources import Environment
+
+ >>> plugins = Environment([]) # normally, a list of plugin directories
+ >>> plugins.add(foo12)
+ >>> plugins.add(foo14)
+ >>> plugins.add(just_a_test)
+
+In the simplest case, we just get the newest version of each distribution in
+the plugin environment::
+
+ >>> ws = WorkingSet([])
+ >>> ws.find_plugins(plugins)
+ ([JustATest 0.99, Foo 1.4 (f14)], {})
+
+But if there's a problem with a version conflict or missing requirements, the
+method falls back to older versions, and the error info dict will contain an
+exception instance for each unloadable plugin::
+
+ >>> ws.add(foo12) # this will conflict with Foo 1.4
+ >>> ws.find_plugins(plugins)
+ ([JustATest 0.99, Foo 1.2 (f12)], {Foo 1.4 (f14): VersionConflict(...)})
+
+But if you disallow fallbacks, the failed plugin will be skipped instead of
+trying older versions::
+
+ >>> ws.find_plugins(plugins, fallback=False)
+ ([JustATest 0.99], {Foo 1.4 (f14): VersionConflict(...)})
+
+
+
+Platform Compatibility Rules
+----------------------------
+
+On the Mac, there are potential compatibility issues for modules compiled
+on newer versions of Mac OS X than what the user is running. Additionally,
+Mac OS X will soon have two platforms to contend with: Intel and PowerPC.
+
+Basic equality works as on other platforms::
+
+ >>> from pkg_resources import compatible_platforms as cp
+ >>> reqd = 'macosx-10.4-ppc'
+ >>> cp(reqd, reqd)
+ True
+ >>> cp("win32", reqd)
+ False
+
+Distributions made on other machine types are not compatible::
+
+ >>> cp("macosx-10.4-i386", reqd)
+ False
+
+Distributions made on earlier versions of the OS are compatible, as
+long as they are from the same top-level version. The patchlevel version
+number does not matter::
+
+ >>> cp("macosx-10.4-ppc", reqd)
+ True
+ >>> cp("macosx-10.3-ppc", reqd)
+ True
+ >>> cp("macosx-10.5-ppc", reqd)
+ False
+ >>> cp("macosx-9.5-ppc", reqd)
+ False
+
+Backwards compatibility for packages made via earlier versions of
+setuptools is provided as well::
+
+ >>> cp("darwin-8.2.0-Power_Macintosh", reqd)
+ True
+ >>> cp("darwin-7.2.0-Power_Macintosh", reqd)
+ True
+ >>> cp("darwin-8.2.0-Power_Macintosh", "macosx-10.3-ppc")
+ False
+
diff --git a/Lib/setuptools/tests/test_resources.py b/Lib/setuptools/tests/test_resources.py
new file mode 100644
index 0000000..f32c72e
--- /dev/null
+++ b/Lib/setuptools/tests/test_resources.py
@@ -0,0 +1,483 @@
+from unittest import TestCase, makeSuite
+from pkg_resources import *
+import pkg_resources, sys
+from sets import ImmutableSet
+
+class Metadata(EmptyProvider):
+ """Mock object to return metadata as if from an on-disk distribution"""
+
+ def __init__(self,*pairs):
+ self.metadata = dict(pairs)
+
+ def has_metadata(self,name):
+ return name in self.metadata
+
+ def get_metadata(self,name):
+ return self.metadata[name]
+
+ def get_metadata_lines(self,name):
+ return yield_lines(self.get_metadata(name))
+
+
+class DistroTests(TestCase):
+
+ def testCollection(self):
+ # empty path should produce no distributions
+ ad = Environment([], platform=None, python=None)
+ self.assertEqual(list(ad), [])
+ self.assertEqual(ad['FooPkg'],[])
+
+ ad.add(Distribution.from_filename("FooPkg-1.3_1.egg"))
+ ad.add(Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg"))
+ ad.add(Distribution.from_filename("FooPkg-1.2-py2.4.egg"))
+
+ # Name is in there now
+ self.failUnless(ad['FooPkg'])
+
+ # But only 1 package
+ self.assertEqual(list(ad), ['foopkg'])
+
+
+
+ # Distributions sort by version
+ self.assertEqual(
+ [dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2']
+ )
+ # Removing a distribution leaves sequence alone
+ ad.remove(ad['FooPkg'][1])
+ self.assertEqual(
+ [dist.version for dist in ad['FooPkg']], ['1.4','1.2']
+ )
+ # And inserting adds them in order
+ ad.add(Distribution.from_filename("FooPkg-1.9.egg"))
+ self.assertEqual(
+ [dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2']
+ )
+
+ ws = WorkingSet([])
+ foo12 = Distribution.from_filename("FooPkg-1.2-py2.4.egg")
+ foo14 = Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg")
+ req, = parse_requirements("FooPkg>=1.3")
+
+ # Nominal case: no distros on path, should yield all applicable
+ self.assertEqual(ad.best_match(req,ws).version, '1.9')
+ # If a matching distro is already installed, should return only that
+ ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4')
+
+ # If the first matching distro is unsuitable, it's a version conflict
+ ws = WorkingSet([]); ws.add(foo12); ws.add(foo14)
+ self.assertRaises(VersionConflict, ad.best_match, req, ws)
+
+ # If more than one match on the path, the first one takes precedence
+ ws = WorkingSet([]); ws.add(foo14); ws.add(foo12); ws.add(foo14);
+ self.assertEqual(ad.best_match(req,ws).version, '1.4')
+
+ def checkFooPkg(self,d):
+ self.assertEqual(d.project_name, "FooPkg")
+ self.assertEqual(d.key, "foopkg")
+ self.assertEqual(d.version, "1.3-1")
+ self.assertEqual(d.py_version, "2.4")
+ self.assertEqual(d.platform, "win32")
+ self.assertEqual(d.parsed_version, parse_version("1.3-1"))
+
+ def testDistroBasics(self):
+ d = Distribution(
+ "/some/path",
+ project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
+ )
+ self.checkFooPkg(d)
+
+ d = Distribution("/some/path")
+ self.assertEqual(d.py_version, sys.version[:3])
+ self.assertEqual(d.platform, None)
+
+ def testDistroParse(self):
+ d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg")
+ self.checkFooPkg(d)
+ d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg-info")
+ self.checkFooPkg(d)
+
+ def testDistroMetadata(self):
+ d = Distribution(
+ "/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
+ metadata = Metadata(
+ ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
+ )
+ )
+ self.checkFooPkg(d)
+
+
+ def distRequires(self, txt):
+ return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
+
+ def checkRequires(self, dist, txt, extras=()):
+ self.assertEqual(
+ list(dist.requires(extras)),
+ list(parse_requirements(txt))
+ )
+
+ def testDistroDependsSimple(self):
+ for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
+ self.checkRequires(self.distRequires(v), v)
+
+
+ def testResolve(self):
+ ad = Environment([]); ws = WorkingSet([])
+ # Resolving no requirements -> nothing to install
+ self.assertEqual( list(ws.resolve([],ad)), [] )
+ # Request something not in the collection -> DistributionNotFound
+ self.assertRaises(
+ DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad
+ )
+ Foo = Distribution.from_filename(
+ "/foo_dir/Foo-1.2.egg",
+ metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
+ )
+ ad.add(Foo); ad.add(Distribution.from_filename("Foo-0.9.egg"))
+
+ # Request thing(s) that are available -> list to activate
+ for i in range(3):
+ targets = list(ws.resolve(parse_requirements("Foo"), ad))
+ self.assertEqual(targets, [Foo])
+ map(ws.add,targets)
+ self.assertRaises(VersionConflict, ws.resolve,
+ parse_requirements("Foo==0.9"), ad)
+ ws = WorkingSet([]) # reset
+
+ # Request an extra that causes an unresolved dependency for "Baz"
+ self.assertRaises(
+ DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad
+ )
+ Baz = Distribution.from_filename(
+ "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
+ )
+ ad.add(Baz)
+
+ # Activation list now includes resolved dependency
+ self.assertEqual(
+ list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz]
+ )
+ # Requests for conflicting versions produce VersionConflict
+ self.assertRaises( VersionConflict,
+ ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad
+ )
+
+ def testDistroDependsOptions(self):
+ d = self.distRequires("""
+ Twisted>=1.5
+ [docgen]
+ ZConfig>=2.0
+ docutils>=0.3
+ [fastcgi]
+ fcgiapp>=0.1""")
+ self.checkRequires(d,"Twisted>=1.5")
+ self.checkRequires(
+ d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
+ )
+ self.checkRequires(
+ d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
+ )
+ self.checkRequires(
+ d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
+ ["docgen","fastcgi"]
+ )
+ self.checkRequires(
+ d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
+ ["fastcgi", "docgen"]
+ )
+ self.assertRaises(UnknownExtra, d.requires, ["foo"])
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class EntryPointTests(TestCase):
+
+ def assertfields(self, ep):
+ self.assertEqual(ep.name,"foo")
+ self.assertEqual(ep.module_name,"setuptools.tests.test_resources")
+ self.assertEqual(ep.attrs, ("EntryPointTests",))
+ self.assertEqual(ep.extras, ("x",))
+ self.failUnless(ep.load() is EntryPointTests)
+ self.assertEqual(
+ str(ep),
+ "foo = setuptools.tests.test_resources:EntryPointTests [x]"
+ )
+
+ def setUp(self):
+ self.dist = Distribution.from_filename(
+ "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
+
+ def testBasics(self):
+ ep = EntryPoint(
+ "foo", "setuptools.tests.test_resources", ["EntryPointTests"],
+ ["x"], self.dist
+ )
+ self.assertfields(ep)
+
+ def testParse(self):
+ s = "foo = setuptools.tests.test_resources:EntryPointTests [x]"
+ ep = EntryPoint.parse(s, self.dist)
+ self.assertfields(ep)
+
+ ep = EntryPoint.parse("bar baz= spammity[PING]")
+ self.assertEqual(ep.name,"bar baz")
+ self.assertEqual(ep.module_name,"spammity")
+ self.assertEqual(ep.attrs, ())
+ self.assertEqual(ep.extras, ("ping",))
+
+ ep = EntryPoint.parse(" fizzly = wocka:foo")
+ self.assertEqual(ep.name,"fizzly")
+ self.assertEqual(ep.module_name,"wocka")
+ self.assertEqual(ep.attrs, ("foo",))
+ self.assertEqual(ep.extras, ())
+
+ def testRejects(self):
+ for ep in [
+ "foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2",
+ ]:
+ try: EntryPoint.parse(ep)
+ except ValueError: pass
+ else: raise AssertionError("Should've been bad", ep)
+
+ def checkSubMap(self, m):
+ self.assertEqual(str(m),
+ "{"
+ "'feature2': EntryPoint.parse("
+ "'feature2 = another.module:SomeClass [extra1,extra2]'), "
+ "'feature1': EntryPoint.parse("
+ "'feature1 = somemodule:somefunction')"
+ "}"
+ )
+
+ submap_str = """
+ # define features for blah blah
+ feature1 = somemodule:somefunction
+ feature2 = another.module:SomeClass [extra1,extra2]
+ """
+
+ def testParseList(self):
+ self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
+ self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar")
+ self.assertRaises(ValueError, EntryPoint.parse_group, "x",
+ ["foo=baz", "foo=bar"])
+
+ def testParseMap(self):
+ m = EntryPoint.parse_map({'xyz':self.submap_str})
+ self.checkSubMap(m['xyz'])
+ self.assertEqual(m.keys(),['xyz'])
+ m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
+ self.checkSubMap(m['xyz'])
+ self.assertEqual(m.keys(),['xyz'])
+ self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"])
+ self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str)
+
+
+class RequirementsTests(TestCase):
+
+ def testBasics(self):
+ r = Requirement.parse("Twisted>=1.2")
+ self.assertEqual(str(r),"Twisted>=1.2")
+ self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
+ self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
+ self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
+ self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
+ self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
+ self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
+ self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
+
+ def testOrdering(self):
+ r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
+ r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
+ self.assertEqual(r1,r2)
+ self.assertEqual(str(r1),str(r2))
+ self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2")
+
+ def testBasicContains(self):
+ r = Requirement("Twisted", [('>=','1.2')], ())
+ foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
+ twist11 = Distribution.from_filename("Twisted-1.1.egg")
+ twist12 = Distribution.from_filename("Twisted-1.2.egg")
+ self.failUnless(parse_version('1.2') in r)
+ self.failUnless(parse_version('1.1') not in r)
+ self.failUnless('1.2' in r)
+ self.failUnless('1.1' not in r)
+ self.failUnless(foo_dist not in r)
+ self.failUnless(twist11 not in r)
+ self.failUnless(twist12 in r)
+
+ def testAdvancedContains(self):
+ r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5")
+ for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'):
+ self.failUnless(v in r, (v,r))
+ for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'):
+ self.failUnless(v not in r, (v,r))
+
+
+ def testOptionsAndHashing(self):
+ r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
+ r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
+ r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
+ self.assertEqual(r1,r2)
+ self.assertEqual(r1,r3)
+ self.assertEqual(r1.extras, ("foo","bar"))
+ self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized
+ self.assertEqual(hash(r1), hash(r2))
+ self.assertEqual(
+ hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
+ ImmutableSet(["foo","bar"])))
+ )
+
+ def testVersionEquality(self):
+ r1 = Requirement.parse("setuptools==0.3a2")
+ r2 = Requirement.parse("setuptools!=0.3a4")
+ d = Distribution.from_filename
+
+ self.failIf(d("setuptools-0.3a4.egg") in r1)
+ self.failIf(d("setuptools-0.3a1.egg") in r1)
+ self.failIf(d("setuptools-0.3a4.egg") in r2)
+
+ self.failUnless(d("setuptools-0.3a2.egg") in r1)
+ self.failUnless(d("setuptools-0.3a2.egg") in r2)
+ self.failUnless(d("setuptools-0.3a3.egg") in r2)
+ self.failUnless(d("setuptools-0.3a5.egg") in r2)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class ParseTests(TestCase):
+
+ def testEmptyParse(self):
+ self.assertEqual(list(parse_requirements('')), [])
+
+ def testYielding(self):
+ for inp,out in [
+ ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
+ (['x\n\n','y'], ['x','y']),
+ ]:
+ self.assertEqual(list(pkg_resources.yield_lines(inp)),out)
+
+ def testSplitting(self):
+ self.assertEqual(
+ list(
+ pkg_resources.split_sections("""
+ x
+ [Y]
+ z
+
+ a
+ [b ]
+ # foo
+ c
+ [ d]
+ [q]
+ v
+ """
+ )
+ ),
+ [(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])]
+ )
+ self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo"))
+
+ def testSafeName(self):
+ self.assertEqual(safe_name("adns-python"), "adns-python")
+ self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
+ self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
+ self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker")
+ self.assertNotEqual(safe_name("peak.web"), "peak-web")
+
+ def testSafeVersion(self):
+ self.assertEqual(safe_version("1.2-1"), "1.2-1")
+ self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha")
+ self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521")
+ self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker")
+ self.assertEqual(safe_version("peak.web"), "peak.web")
+
+ def testSimpleRequirements(self):
+ self.assertEqual(
+ list(parse_requirements('Twis-Ted>=1.2-1')),
+ [Requirement('Twis-Ted',[('>=','1.2-1')], ())]
+ )
+ self.assertEqual(
+ list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')),
+ [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
+ )
+ self.assertEqual(
+ Requirement.parse("FooBar==1.99a3"),
+ Requirement("FooBar", [('==','1.99a3')], ())
+ )
+ self.assertRaises(ValueError,Requirement.parse,">=2.3")
+ self.assertRaises(ValueError,Requirement.parse,"x\\")
+ self.assertRaises(ValueError,Requirement.parse,"x==2 q")
+ self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2")
+ self.assertRaises(ValueError,Requirement.parse,"#")
+
+ def testVersionEquality(self):
+ def c(s1,s2):
+ p1, p2 = parse_version(s1),parse_version(s2)
+ self.assertEqual(p1,p2, (s1,s2,p1,p2))
+
+ c('1.2-rc1', '1.2rc1')
+ c('0.4', '0.4.0')
+ c('0.4.0.0', '0.4.0')
+ c('0.4.0-0', '0.4-0')
+ c('0pl1', '0.0pl1')
+ c('0pre1', '0.0c1')
+ c('0.0.0preview1', '0c1')
+ c('0.0c1', '0-rc1')
+ c('1.2a1', '1.2.a.1'); c('1.2...a', '1.2a')
+
+ def testVersionOrdering(self):
+ def c(s1,s2):
+ p1, p2 = parse_version(s1),parse_version(s2)
+ self.failUnless(p1<p2, (s1,s2,p1,p2))
+
+ c('2.1','2.1.1')
+ c('2a1','2b0')
+ c('2a1','2.1')
+ c('2.3a1', '2.3')
+ c('2.1-1', '2.1-2')
+ c('2.1-1', '2.1.1')
+ c('2.1', '2.1pl4')
+ c('2.1a0-20040501', '2.1')
+ c('1.1', '02.1')
+ c('A56','B27')
+ c('3.2', '3.2.pl0')
+ c('3.2-1', '3.2pl1')
+ c('3.2pl1', '3.2pl1-1')
+ c('0.4', '4.0')
+ c('0.0.4', '0.4.0')
+ c('0pl1', '0.4pl1')
+ c('2.1.0-rc1','2.1.0')
+
+ torture ="""
+ 0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
+ 0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
+ 0.77.2-1 0.77.1-1 0.77.0-1
+ """.split()
+
+ for p,v1 in enumerate(torture):
+ for v2 in torture[p+1:]:
+ c(v2,v1)
diff --git a/Lib/sgmllib.py b/Lib/sgmllib.py
index 08e365b..3e85a91 100644
--- a/Lib/sgmllib.py
+++ b/Lib/sgmllib.py
@@ -269,9 +269,37 @@ class SGMLParser(markupbase.ParserBase):
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
- elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
- attrvalue[:1] == '"' == attrvalue[-1:]:
- attrvalue = attrvalue[1:-1]
+ else:
+ if (attrvalue[:1] == "'" == attrvalue[-1:] or
+ attrvalue[:1] == '"' == attrvalue[-1:]):
+ # strip quotes
+ attrvalue = attrvalue[1:-1]
+ l = 0
+ new_attrvalue = ''
+ while l < len(attrvalue):
+ av_match = entityref.match(attrvalue, l)
+ if (av_match and av_match.group(1) in self.entitydefs and
+ attrvalue[av_match.end(1)] == ';'):
+ # only substitute entityrefs ending in ';' since
+ # otherwise we may break <a href='?p=x&q=y'>
+ # which is very common
+ new_attrvalue += self.entitydefs[av_match.group(1)]
+ l = av_match.end(0)
+ continue
+ ch_match = charref.match(attrvalue, l)
+ if ch_match:
+ try:
+ char = chr(int(ch_match.group(1)))
+ new_attrvalue += char
+ l = ch_match.end(0)
+ continue
+ except ValueError:
+ # invalid character reference, don't substitute
+ pass
+ # all other cases
+ new_attrvalue += attrvalue[l]
+ l += 1
+ attrvalue = new_attrvalue
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
diff --git a/Lib/site.py b/Lib/site.py
index 5e7ff7b..47eda24 100644
--- a/Lib/site.py
+++ b/Lib/site.py
@@ -69,6 +69,8 @@ def makepath(*paths):
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
+ if hasattr(m, '__loader__'):
+ continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except AttributeError:
diff --git a/Lib/smtplib.py b/Lib/smtplib.py
index 71d25fd..07916cc 100755
--- a/Lib/smtplib.py
+++ b/Lib/smtplib.py
@@ -255,7 +255,11 @@ class SMTP:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
- addr = socket.gethostbyname(socket.gethostname())
+ addr = '127.0.0.1'
+ try:
+ addr = socket.gethostbyname(socket.gethostname())
+ except socket.gaierror:
+ pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
diff --git a/Lib/socket.py b/Lib/socket.py
index ee2457f..32a92b4 100644
--- a/Lib/socket.py
+++ b/Lib/socket.py
@@ -121,14 +121,6 @@ def getfqdn(name=''):
return name
-#
-# These classes are used by the socket() defined on Windows and BeOS
-# platforms to provide a best-effort implementation of the cleanup
-# semantics needed when sockets can't be dup()ed.
-#
-# These are not actually used on other platforms.
-#
-
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
@@ -183,6 +175,10 @@ class _socketobject(object):
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
+ family = property(lambda self: self._sock.family, doc="the socket family")
+ type = property(lambda self: self._sock.type, doc="the socket type")
+ proto = property(lambda self: self._sock.proto, doc="the socket protocol")
+
_s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
"%s.__doc__ = _realsocket.%s.__doc__\n")
for _m in _socketmethods:
diff --git a/Lib/sqlite3/__init__.py b/Lib/sqlite3/__init__.py
new file mode 100644
index 0000000..41ef2b7
--- /dev/null
+++ b/Lib/sqlite3/__init__.py
@@ -0,0 +1,24 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/__init__.py: the pysqlite2 package.
+#
+# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+from dbapi2 import *
diff --git a/Lib/sqlite3/dbapi2.py b/Lib/sqlite3/dbapi2.py
new file mode 100644
index 0000000..e0c8a84
--- /dev/null
+++ b/Lib/sqlite3/dbapi2.py
@@ -0,0 +1,84 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/dbapi2.py: the DB-API 2.0 interface
+#
+# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import datetime
+
+paramstyle = "qmark"
+
+threadsafety = 1
+
+apilevel = "2.0"
+
+from _sqlite3 import *
+
+import datetime, time
+
+Date = datetime.date
+
+Time = datetime.time
+
+Timestamp = datetime.datetime
+
+def DateFromTicks(ticks):
+ return apply(Date,time.localtime(ticks)[:3])
+
+def TimeFromTicks(ticks):
+ return apply(Time,time.localtime(ticks)[3:6])
+
+def TimestampFromTicks(ticks):
+ return apply(Timestamp,time.localtime(ticks)[:6])
+
+_major, _minor, _micro = version.split(".")
+version_info = (int(_major), int(_minor), _micro)
+_major, _minor, _micro = sqlite_version.split(".")
+sqlite_version_info = (int(_major), int(_minor), _micro)
+
+Binary = buffer
+
+def adapt_date(val):
+ return val.isoformat()
+
+def adapt_datetime(val):
+ return val.isoformat(" ")
+
+def convert_date(val):
+ return datetime.date(*map(int, val.split("-")))
+
+def convert_timestamp(val):
+ datepart, timepart = val.split(" ")
+ year, month, day = map(int, datepart.split("-"))
+ timepart_full = timepart.split(".")
+ hours, minutes, seconds = map(int, timepart_full[0].split(":"))
+ if len(timepart_full) == 2:
+ microseconds = int(float("0." + timepart_full[1]) * 1000000)
+ else:
+ microseconds = 0
+
+ val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
+ return val
+
+
+register_adapter(datetime.date, adapt_date)
+register_adapter(datetime.datetime, adapt_datetime)
+register_converter("date", convert_date)
+register_converter("timestamp", convert_timestamp)
diff --git a/Lib/sqlite3/test/__init__.py b/Lib/sqlite3/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/Lib/sqlite3/test/__init__.py
diff --git a/Lib/sqlite3/test/dbapi.py b/Lib/sqlite3/test/dbapi.py
new file mode 100644
index 0000000..b08da9c
--- /dev/null
+++ b/Lib/sqlite3/test/dbapi.py
@@ -0,0 +1,732 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/dbapi.py: tests for DB-API compliance
+#
+# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import unittest
+import threading
+import sqlite3 as sqlite
+
+class ModuleTests(unittest.TestCase):
+ def CheckAPILevel(self):
+ self.assertEqual(sqlite.apilevel, "2.0",
+ "apilevel is %s, should be 2.0" % sqlite.apilevel)
+
+ def CheckThreadSafety(self):
+ self.assertEqual(sqlite.threadsafety, 1,
+ "threadsafety is %d, should be 1" % sqlite.threadsafety)
+
+ def CheckParamStyle(self):
+ self.assertEqual(sqlite.paramstyle, "qmark",
+ "paramstyle is '%s', should be 'qmark'" %
+ sqlite.paramstyle)
+
+ def CheckWarning(self):
+ self.assert_(issubclass(sqlite.Warning, StandardError),
+ "Warning is not a subclass of StandardError")
+
+ def CheckError(self):
+ self.failUnless(issubclass(sqlite.Error, StandardError),
+ "Error is not a subclass of StandardError")
+
+ def CheckInterfaceError(self):
+ self.failUnless(issubclass(sqlite.InterfaceError, sqlite.Error),
+ "InterfaceError is not a subclass of Error")
+
+ def CheckDatabaseError(self):
+ self.failUnless(issubclass(sqlite.DatabaseError, sqlite.Error),
+ "DatabaseError is not a subclass of Error")
+
+ def CheckDataError(self):
+ self.failUnless(issubclass(sqlite.DataError, sqlite.DatabaseError),
+ "DataError is not a subclass of DatabaseError")
+
+ def CheckOperationalError(self):
+ self.failUnless(issubclass(sqlite.OperationalError, sqlite.DatabaseError),
+ "OperationalError is not a subclass of DatabaseError")
+
+ def CheckIntegrityError(self):
+ self.failUnless(issubclass(sqlite.IntegrityError, sqlite.DatabaseError),
+ "IntegrityError is not a subclass of DatabaseError")
+
+ def CheckInternalError(self):
+ self.failUnless(issubclass(sqlite.InternalError, sqlite.DatabaseError),
+ "InternalError is not a subclass of DatabaseError")
+
+ def CheckProgrammingError(self):
+ self.failUnless(issubclass(sqlite.ProgrammingError, sqlite.DatabaseError),
+ "ProgrammingError is not a subclass of DatabaseError")
+
+ def CheckNotSupportedError(self):
+ self.failUnless(issubclass(sqlite.NotSupportedError,
+ sqlite.DatabaseError),
+ "NotSupportedError is not a subclass of DatabaseError")
+
+class ConnectionTests(unittest.TestCase):
+ def setUp(self):
+ self.cx = sqlite.connect(":memory:")
+ cu = self.cx.cursor()
+ cu.execute("create table test(id integer primary key, name text)")
+ cu.execute("insert into test(name) values (?)", ("foo",))
+
+ def tearDown(self):
+ self.cx.close()
+
+ def CheckCommit(self):
+ self.cx.commit()
+
+ def CheckCommitAfterNoChanges(self):
+ """
+ A commit should also work when no changes were made to the database.
+ """
+ self.cx.commit()
+ self.cx.commit()
+
+ def CheckRollback(self):
+ self.cx.rollback()
+
+ def CheckRollbackAfterNoChanges(self):
+ """
+ A rollback should also work when no changes were made to the database.
+ """
+ self.cx.rollback()
+ self.cx.rollback()
+
+ def CheckCursor(self):
+ cu = self.cx.cursor()
+
+ def CheckFailedOpen(self):
+ YOU_CANNOT_OPEN_THIS = "/foo/bar/bla/23534/mydb.db"
+ try:
+ con = sqlite.connect(YOU_CANNOT_OPEN_THIS)
+ except sqlite.OperationalError:
+ return
+ self.fail("should have raised an OperationalError")
+
+ def CheckClose(self):
+ self.cx.close()
+
+ def CheckExceptions(self):
+ # Optional DB-API extension.
+ self.failUnlessEqual(self.cx.Warning, sqlite.Warning)
+ self.failUnlessEqual(self.cx.Error, sqlite.Error)
+ self.failUnlessEqual(self.cx.InterfaceError, sqlite.InterfaceError)
+ self.failUnlessEqual(self.cx.DatabaseError, sqlite.DatabaseError)
+ self.failUnlessEqual(self.cx.DataError, sqlite.DataError)
+ self.failUnlessEqual(self.cx.OperationalError, sqlite.OperationalError)
+ self.failUnlessEqual(self.cx.IntegrityError, sqlite.IntegrityError)
+ self.failUnlessEqual(self.cx.InternalError, sqlite.InternalError)
+ self.failUnlessEqual(self.cx.ProgrammingError, sqlite.ProgrammingError)
+ self.failUnlessEqual(self.cx.NotSupportedError, sqlite.NotSupportedError)
+
+class CursorTests(unittest.TestCase):
+ def setUp(self):
+ self.cx = sqlite.connect(":memory:")
+ self.cu = self.cx.cursor()
+ self.cu.execute("create table test(id integer primary key, name text, income number)")
+ self.cu.execute("insert into test(name) values (?)", ("foo",))
+
+ def tearDown(self):
+ self.cu.close()
+ self.cx.close()
+
+ def CheckExecuteNoArgs(self):
+ self.cu.execute("delete from test")
+
+ def CheckExecuteIllegalSql(self):
+ try:
+ self.cu.execute("select asdf")
+ self.fail("should have raised an OperationalError")
+ except sqlite.OperationalError:
+ return
+ except:
+ self.fail("raised wrong exception")
+
+ def CheckExecuteTooMuchSql(self):
+ try:
+ self.cu.execute("select 5+4; select 4+5")
+ self.fail("should have raised a Warning")
+ except sqlite.Warning:
+ return
+ except:
+ self.fail("raised wrong exception")
+
+ def CheckExecuteTooMuchSql2(self):
+ self.cu.execute("select 5+4; -- foo bar")
+
+ def CheckExecuteTooMuchSql3(self):
+ self.cu.execute("""
+ select 5+4;
+
+ /*
+ foo
+ */
+ """)
+
+ def CheckExecuteWrongSqlArg(self):
+ try:
+ self.cu.execute(42)
+ self.fail("should have raised a ValueError")
+ except ValueError:
+ return
+ except:
+ self.fail("raised wrong exception.")
+
+ def CheckExecuteArgInt(self):
+ self.cu.execute("insert into test(id) values (?)", (42,))
+
+ def CheckExecuteArgFloat(self):
+ self.cu.execute("insert into test(income) values (?)", (2500.32,))
+
+ def CheckExecuteArgString(self):
+ self.cu.execute("insert into test(name) values (?)", ("Hugo",))
+
+ def CheckExecuteWrongNoOfArgs1(self):
+ # too many parameters
+ try:
+ self.cu.execute("insert into test(id) values (?)", (17, "Egon"))
+ self.fail("should have raised ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+
+ def CheckExecuteWrongNoOfArgs2(self):
+ # too little parameters
+ try:
+ self.cu.execute("insert into test(id) values (?)")
+ self.fail("should have raised ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+
+ def CheckExecuteWrongNoOfArgs3(self):
+ # no parameters, parameters are needed
+ try:
+ self.cu.execute("insert into test(id) values (?)")
+ self.fail("should have raised ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+
+ def CheckExecuteDictMapping(self):
+ self.cu.execute("insert into test(name) values ('foo')")
+ self.cu.execute("select name from test where name=:name", {"name": "foo"})
+ row = self.cu.fetchone()
+ self.failUnlessEqual(row[0], "foo")
+
+ def CheckExecuteDictMappingTooLittleArgs(self):
+ self.cu.execute("insert into test(name) values ('foo')")
+ try:
+ self.cu.execute("select name from test where name=:name and id=:id", {"name": "foo"})
+ self.fail("should have raised ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+
+ def CheckExecuteDictMappingNoArgs(self):
+ self.cu.execute("insert into test(name) values ('foo')")
+ try:
+ self.cu.execute("select name from test where name=:name")
+ self.fail("should have raised ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+
+ def CheckExecuteDictMappingUnnamed(self):
+ self.cu.execute("insert into test(name) values ('foo')")
+ try:
+ self.cu.execute("select name from test where name=?", {"name": "foo"})
+ self.fail("should have raised ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+
+ def CheckClose(self):
+ self.cu.close()
+
+ def CheckRowcountExecute(self):
+ self.cu.execute("delete from test")
+ self.cu.execute("insert into test(name) values ('foo')")
+ self.cu.execute("insert into test(name) values ('foo')")
+ self.cu.execute("update test set name='bar'")
+ self.failUnlessEqual(self.cu.rowcount, 2)
+
+ def CheckRowcountExecutemany(self):
+ self.cu.execute("delete from test")
+ self.cu.executemany("insert into test(name) values (?)", [(1,), (2,), (3,)])
+ self.failUnlessEqual(self.cu.rowcount, 3)
+
+ def CheckTotalChanges(self):
+ self.cu.execute("insert into test(name) values ('foo')")
+ self.cu.execute("insert into test(name) values ('foo')")
+ if self.cx.total_changes < 2:
+ self.fail("total changes reported wrong value")
+
+ # Checks for executemany:
+ # Sequences are required by the DB-API, iterators
+ # enhancements in pysqlite.
+
+ def CheckExecuteManySequence(self):
+ self.cu.executemany("insert into test(income) values (?)", [(x,) for x in range(100, 110)])
+
+ def CheckExecuteManyIterator(self):
+ class MyIter:
+ def __init__(self):
+ self.value = 5
+
+ def next(self):
+ if self.value == 10:
+ raise StopIteration
+ else:
+ self.value += 1
+ return (self.value,)
+
+ self.cu.executemany("insert into test(income) values (?)", MyIter())
+
+ def CheckExecuteManyGenerator(self):
+ def mygen():
+ for i in range(5):
+ yield (i,)
+
+ self.cu.executemany("insert into test(income) values (?)", mygen())
+
+ def CheckExecuteManyWrongSqlArg(self):
+ try:
+ self.cu.executemany(42, [(3,)])
+ self.fail("should have raised a ValueError")
+ except ValueError:
+ return
+ except:
+ self.fail("raised wrong exception.")
+
+ def CheckExecuteManySelect(self):
+ try:
+ self.cu.executemany("select ?", [(3,)])
+ self.fail("should have raised a ProgrammingError")
+ except sqlite.ProgrammingError:
+ return
+ except:
+ self.fail("raised wrong exception.")
+
+ def CheckExecuteManyNotIterable(self):
+ try:
+ self.cu.executemany("insert into test(income) values (?)", 42)
+ self.fail("should have raised a TypeError")
+ except TypeError:
+ return
+ except Exception, e:
+ print "raised", e.__class__
+ self.fail("raised wrong exception.")
+
+ def CheckFetchIter(self):
+ # Optional DB-API extension.
+ self.cu.execute("delete from test")
+ self.cu.execute("insert into test(id) values (?)", (5,))
+ self.cu.execute("insert into test(id) values (?)", (6,))
+ self.cu.execute("select id from test order by id")
+ lst = []
+ for row in self.cu:
+ lst.append(row[0])
+ self.failUnlessEqual(lst[0], 5)
+ self.failUnlessEqual(lst[1], 6)
+
+ def CheckFetchone(self):
+ self.cu.execute("select name from test")
+ row = self.cu.fetchone()
+ self.failUnlessEqual(row[0], "foo")
+ row = self.cu.fetchone()
+ self.failUnlessEqual(row, None)
+
+ def CheckFetchoneNoStatement(self):
+ cur = self.cx.cursor()
+ row = cur.fetchone()
+ self.failUnlessEqual(row, None)
+
+ def CheckArraySize(self):
+ # must default ot 1
+ self.failUnlessEqual(self.cu.arraysize, 1)
+
+ # now set to 2
+ self.cu.arraysize = 2
+
+ # now make the query return 3 rows
+ self.cu.execute("delete from test")
+ self.cu.execute("insert into test(name) values ('A')")
+ self.cu.execute("insert into test(name) values ('B')")
+ self.cu.execute("insert into test(name) values ('C')")
+ self.cu.execute("select name from test")
+ res = self.cu.fetchmany()
+
+ self.failUnlessEqual(len(res), 2)
+
+ def CheckFetchmany(self):
+ self.cu.execute("select name from test")
+ res = self.cu.fetchmany(100)
+ self.failUnlessEqual(len(res), 1)
+ res = self.cu.fetchmany(100)
+ self.failUnlessEqual(res, [])
+
+ def CheckFetchall(self):
+ self.cu.execute("select name from test")
+ res = self.cu.fetchall()
+ self.failUnlessEqual(len(res), 1)
+ res = self.cu.fetchall()
+ self.failUnlessEqual(res, [])
+
+ def CheckSetinputsizes(self):
+ self.cu.setinputsizes([3, 4, 5])
+
+ def CheckSetoutputsize(self):
+ self.cu.setoutputsize(5, 0)
+
+ def CheckSetoutputsizeNoColumn(self):
+ self.cu.setoutputsize(42)
+
+ def CheckCursorConnection(self):
+ # Optional DB-API extension.
+ self.failUnlessEqual(self.cu.connection, self.cx)
+
+ def CheckWrongCursorCallable(self):
+ try:
+ def f(): pass
+ cur = self.cx.cursor(f)
+ self.fail("should have raised a TypeError")
+ except TypeError:
+ return
+ self.fail("should have raised a ValueError")
+
+ def CheckCursorWrongClass(self):
+ class Foo: pass
+ foo = Foo()
+ try:
+ cur = sqlite.Cursor(foo)
+ self.fail("should have raised a ValueError")
+ except TypeError:
+ pass
+
+class ThreadTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+ self.cur = self.con.cursor()
+ self.cur.execute("create table test(id integer primary key, name text, bin binary, ratio number, ts timestamp)")
+
+ def tearDown(self):
+ self.cur.close()
+ self.con.close()
+
+ def CheckConCursor(self):
+ def run(con, errors):
+ try:
+ cur = con.cursor()
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckConCommit(self):
+ def run(con, errors):
+ try:
+ con.commit()
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckConRollback(self):
+ def run(con, errors):
+ try:
+ con.rollback()
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckConClose(self):
+ def run(con, errors):
+ try:
+ con.close()
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckCurImplicitBegin(self):
+ def run(cur, errors):
+ try:
+ cur.execute("insert into test(name) values ('a')")
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckCurClose(self):
+ def run(cur, errors):
+ try:
+ cur.close()
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckCurExecute(self):
+ def run(cur, errors):
+ try:
+ cur.execute("select name from test")
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ self.cur.execute("insert into test(name) values ('a')")
+ t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+ def CheckCurIterNext(self):
+ def run(cur, errors):
+ try:
+ row = cur.fetchone()
+ errors.append("did not raise ProgrammingError")
+ return
+ except sqlite.ProgrammingError:
+ return
+ except:
+ errors.append("raised wrong exception")
+
+ errors = []
+ self.cur.execute("insert into test(name) values ('a')")
+ self.cur.execute("select name from test")
+ t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
+ t.start()
+ t.join()
+ if len(errors) > 0:
+ self.fail("\n".join(errors))
+
+class ConstructorTests(unittest.TestCase):
+ def CheckDate(self):
+ d = sqlite.Date(2004, 10, 28)
+
+ def CheckTime(self):
+ t = sqlite.Time(12, 39, 35)
+
+ def CheckTimestamp(self):
+ ts = sqlite.Timestamp(2004, 10, 28, 12, 39, 35)
+
+ def CheckDateFromTicks(self):
+ d = sqlite.DateFromTicks(42)
+
+ def CheckTimeFromTicks(self):
+ t = sqlite.TimeFromTicks(42)
+
+ def CheckTimestampFromTicks(self):
+ ts = sqlite.TimestampFromTicks(42)
+
+ def CheckBinary(self):
+ b = sqlite.Binary(chr(0) + "'")
+
+class ExtensionTests(unittest.TestCase):
+ def CheckScriptStringSql(self):
+ con = sqlite.connect(":memory:")
+ cur = con.cursor()
+ cur.executescript("""
+ -- bla bla
+ /* a stupid comment */
+ create table a(i);
+ insert into a(i) values (5);
+ """)
+ cur.execute("select i from a")
+ res = cur.fetchone()[0]
+ self.failUnlessEqual(res, 5)
+
+ def CheckScriptStringUnicode(self):
+ con = sqlite.connect(":memory:")
+ cur = con.cursor()
+ cur.executescript(u"""
+ create table a(i);
+ insert into a(i) values (5);
+ select i from a;
+ delete from a;
+ insert into a(i) values (6);
+ """)
+ cur.execute("select i from a")
+ res = cur.fetchone()[0]
+ self.failUnlessEqual(res, 6)
+
+ def CheckScriptErrorIncomplete(self):
+ con = sqlite.connect(":memory:")
+ cur = con.cursor()
+ raised = False
+ try:
+ cur.executescript("create table test(sadfsadfdsa")
+ except sqlite.ProgrammingError:
+ raised = True
+ self.failUnlessEqual(raised, True, "should have raised an exception")
+
+ def CheckScriptErrorNormal(self):
+ con = sqlite.connect(":memory:")
+ cur = con.cursor()
+ raised = False
+ try:
+ cur.executescript("create table test(sadfsadfdsa); select foo from hurz;")
+ except sqlite.OperationalError:
+ raised = True
+ self.failUnlessEqual(raised, True, "should have raised an exception")
+
+ def CheckConnectionExecute(self):
+ con = sqlite.connect(":memory:")
+ result = con.execute("select 5").fetchone()[0]
+ self.failUnlessEqual(result, 5, "Basic test of Connection.execute")
+
+ def CheckConnectionExecutemany(self):
+ con = sqlite.connect(":memory:")
+ con.execute("create table test(foo)")
+ con.executemany("insert into test(foo) values (?)", [(3,), (4,)])
+ result = con.execute("select foo from test order by foo").fetchall()
+ self.failUnlessEqual(result[0][0], 3, "Basic test of Connection.executemany")
+ self.failUnlessEqual(result[1][0], 4, "Basic test of Connection.executemany")
+
+ def CheckConnectionExecutescript(self):
+ con = sqlite.connect(":memory:")
+ con.executescript("create table test(foo); insert into test(foo) values (5);")
+ result = con.execute("select foo from test").fetchone()[0]
+ self.failUnlessEqual(result, 5, "Basic test of Connection.executescript")
+
+class ClosedTests(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def CheckClosedConCursor(self):
+ con = sqlite.connect(":memory:")
+ con.close()
+ try:
+ cur = con.cursor()
+ self.fail("Should have raised a ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+ except:
+ self.fail("Should have raised a ProgrammingError")
+
+ def CheckClosedConCommit(self):
+ con = sqlite.connect(":memory:")
+ con.close()
+ try:
+ con.commit()
+ self.fail("Should have raised a ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+ except:
+ self.fail("Should have raised a ProgrammingError")
+
+ def CheckClosedConRollback(self):
+ con = sqlite.connect(":memory:")
+ con.close()
+ try:
+ con.rollback()
+ self.fail("Should have raised a ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+ except:
+ self.fail("Should have raised a ProgrammingError")
+
+ def CheckClosedCurExecute(self):
+ con = sqlite.connect(":memory:")
+ cur = con.cursor()
+ con.close()
+ try:
+ cur.execute("select 4")
+ self.fail("Should have raised a ProgrammingError")
+ except sqlite.ProgrammingError:
+ pass
+ except:
+ self.fail("Should have raised a ProgrammingError")
+
+def suite():
+ module_suite = unittest.makeSuite(ModuleTests, "Check")
+ connection_suite = unittest.makeSuite(ConnectionTests, "Check")
+ cursor_suite = unittest.makeSuite(CursorTests, "Check")
+ thread_suite = unittest.makeSuite(ThreadTests, "Check")
+ constructor_suite = unittest.makeSuite(ConstructorTests, "Check")
+ ext_suite = unittest.makeSuite(ExtensionTests, "Check")
+ closed_suite = unittest.makeSuite(ClosedTests, "Check")
+ return unittest.TestSuite((module_suite, connection_suite, cursor_suite, thread_suite, constructor_suite, ext_suite, closed_suite))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sqlite3/test/factory.py b/Lib/sqlite3/test/factory.py
new file mode 100644
index 0000000..8778056
--- /dev/null
+++ b/Lib/sqlite3/test/factory.py
@@ -0,0 +1,164 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/factory.py: tests for the various factories in pysqlite
+#
+# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import unittest
+import sqlite3 as sqlite
+
+class MyConnection(sqlite.Connection):
+ def __init__(self, *args, **kwargs):
+ sqlite.Connection.__init__(self, *args, **kwargs)
+
+def dict_factory(cursor, row):
+ d = {}
+ for idx, col in enumerate(cursor.description):
+ d[col[0]] = row[idx]
+ return d
+
+class MyCursor(sqlite.Cursor):
+ def __init__(self, *args, **kwargs):
+ sqlite.Cursor.__init__(self, *args, **kwargs)
+ self.row_factory = dict_factory
+
+class ConnectionFactoryTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:", factory=MyConnection)
+
+ def tearDown(self):
+ self.con.close()
+
+ def CheckIsInstance(self):
+ self.failUnless(isinstance(self.con,
+ MyConnection),
+ "connection is not instance of MyConnection")
+
+class CursorFactoryTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+
+ def tearDown(self):
+ self.con.close()
+
+ def CheckIsInstance(self):
+ cur = self.con.cursor(factory=MyCursor)
+ self.failUnless(isinstance(cur,
+ MyCursor),
+ "cursor is not instance of MyCursor")
+
+class RowFactoryTestsBackwardsCompat(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+
+ def CheckIsProducedByFactory(self):
+ cur = self.con.cursor(factory=MyCursor)
+ cur.execute("select 4+5 as foo")
+ row = cur.fetchone()
+ self.failUnless(isinstance(row,
+ dict),
+ "row is not instance of dict")
+ cur.close()
+
+ def tearDown(self):
+ self.con.close()
+
+class RowFactoryTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+
+ def CheckCustomFactory(self):
+ self.con.row_factory = lambda cur, row: list(row)
+ row = self.con.execute("select 1, 2").fetchone()
+ self.failUnless(isinstance(row,
+ list),
+ "row is not instance of list")
+
+ def CheckSqliteRow(self):
+ self.con.row_factory = sqlite.Row
+ row = self.con.execute("select 1 as a, 2 as b").fetchone()
+ self.failUnless(isinstance(row,
+ sqlite.Row),
+ "row is not instance of sqlite.Row")
+
+ col1, col2 = row["a"], row["b"]
+ self.failUnless(col1 == 1, "by name: wrong result for column 'a'")
+ self.failUnless(col2 == 2, "by name: wrong result for column 'a'")
+
+ col1, col2 = row["A"], row["B"]
+ self.failUnless(col1 == 1, "by name: wrong result for column 'A'")
+ self.failUnless(col2 == 2, "by name: wrong result for column 'B'")
+
+ col1, col2 = row[0], row[1]
+ self.failUnless(col1 == 1, "by index: wrong result for column 0")
+ self.failUnless(col2 == 2, "by index: wrong result for column 1")
+
+ def tearDown(self):
+ self.con.close()
+
+class TextFactoryTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+
+ def CheckUnicode(self):
+ austria = unicode("Österreich", "latin1")
+ row = self.con.execute("select ?", (austria,)).fetchone()
+ self.failUnless(type(row[0]) == unicode, "type of row[0] must be unicode")
+
+ def CheckString(self):
+ self.con.text_factory = str
+ austria = unicode("Österreich", "latin1")
+ row = self.con.execute("select ?", (austria,)).fetchone()
+ self.failUnless(type(row[0]) == str, "type of row[0] must be str")
+ self.failUnless(row[0] == austria.encode("utf-8"), "column must equal original data in UTF-8")
+
+ def CheckCustom(self):
+ self.con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
+ austria = unicode("Österreich", "latin1")
+ row = self.con.execute("select ?", (austria.encode("latin1"),)).fetchone()
+ self.failUnless(type(row[0]) == unicode, "type of row[0] must be unicode")
+ self.failUnless(row[0].endswith(u"reich"), "column must contain original data")
+
+ def CheckOptimizedUnicode(self):
+ self.con.text_factory = sqlite.OptimizedUnicode
+ austria = unicode("Österreich", "latin1")
+ germany = unicode("Deutchland")
+ a_row = self.con.execute("select ?", (austria,)).fetchone()
+ d_row = self.con.execute("select ?", (germany,)).fetchone()
+ self.failUnless(type(a_row[0]) == unicode, "type of non-ASCII row must be unicode")
+ self.failUnless(type(d_row[0]) == str, "type of ASCII-only row must be str")
+
+ def tearDown(self):
+ self.con.close()
+
+def suite():
+ connection_suite = unittest.makeSuite(ConnectionFactoryTests, "Check")
+ cursor_suite = unittest.makeSuite(CursorFactoryTests, "Check")
+ row_suite_compat = unittest.makeSuite(RowFactoryTestsBackwardsCompat, "Check")
+ row_suite = unittest.makeSuite(RowFactoryTests, "Check")
+ text_suite = unittest.makeSuite(TextFactoryTests, "Check")
+ return unittest.TestSuite((connection_suite, cursor_suite, row_suite_compat, row_suite, text_suite))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sqlite3/test/hooks.py b/Lib/sqlite3/test/hooks.py
new file mode 100644
index 0000000..21f7b88
--- /dev/null
+++ b/Lib/sqlite3/test/hooks.py
@@ -0,0 +1,115 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/hooks.py: tests for various SQLite-specific hooks
+#
+# Copyright (C) 2006 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import os, unittest
+import pysqlite2.dbapi2 as sqlite
+
+class CollationTests(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def CheckCreateCollationNotCallable(self):
+ con = sqlite.connect(":memory:")
+ try:
+ con.create_collation("X", 42)
+ self.fail("should have raised a TypeError")
+ except TypeError, e:
+ self.failUnlessEqual(e.args[0], "parameter must be callable")
+
+ def CheckCreateCollationNotAscii(self):
+ con = sqlite.connect(":memory:")
+ try:
+ con.create_collation("collä", cmp)
+ self.fail("should have raised a ProgrammingError")
+ except sqlite.ProgrammingError, e:
+ pass
+
+ def CheckCollationIsUsed(self):
+ def mycoll(x, y):
+ # reverse order
+ return -cmp(x, y)
+
+ con = sqlite.connect(":memory:")
+ con.create_collation("mycoll", mycoll)
+ sql = """
+ select x from (
+ select 'a' as x
+ union
+ select 'b' as x
+ union
+ select 'c' as x
+ ) order by x collate mycoll
+ """
+ result = con.execute(sql).fetchall()
+ if result[0][0] != "c" or result[1][0] != "b" or result[2][0] != "a":
+ self.fail("the expected order was not returned")
+
+ con.create_collation("mycoll", None)
+ try:
+ result = con.execute(sql).fetchall()
+ self.fail("should have raised an OperationalError")
+ except sqlite.OperationalError, e:
+ self.failUnlessEqual(e.args[0], "no such collation sequence: mycoll")
+
+ def CheckCollationRegisterTwice(self):
+ """
+ Register two different collation functions under the same name.
+ Verify that the last one is actually used.
+ """
+ con = sqlite.connect(":memory:")
+ con.create_collation("mycoll", cmp)
+ con.create_collation("mycoll", lambda x, y: -cmp(x, y))
+ result = con.execute("""
+ select x from (select 'a' as x union select 'b' as x) order by x collate mycoll
+ """).fetchall()
+ if result[0][0] != 'b' or result[1][0] != 'a':
+ self.fail("wrong collation function is used")
+
+ def CheckDeregisterCollation(self):
+ """
+ Register a collation, then deregister it. Make sure an error is raised if we try
+ to use it.
+ """
+ con = sqlite.connect(":memory:")
+ con.create_collation("mycoll", cmp)
+ con.create_collation("mycoll", None)
+ try:
+ con.execute("select 'a' as x union select 'b' as x order by x collate mycoll")
+ self.fail("should have raised an OperationalError")
+ except sqlite.OperationalError, e:
+ if not e.args[0].startswith("no such collation sequence"):
+ self.fail("wrong OperationalError raised")
+
+def suite():
+ collation_suite = unittest.makeSuite(CollationTests, "Check")
+ return unittest.TestSuite((collation_suite,))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sqlite3/test/regression.py b/Lib/sqlite3/test/regression.py
new file mode 100644
index 0000000..648ada5
--- /dev/null
+++ b/Lib/sqlite3/test/regression.py
@@ -0,0 +1,48 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/regression.py: pysqlite regression tests
+#
+# Copyright (C) 2006 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import unittest
+import pysqlite2.dbapi2 as sqlite
+
+class RegressionTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+
+ def tearDown(self):
+ self.con.close()
+
+ def CheckPragmaUserVersion(self):
+ # This used to crash pysqlite because this pragma command returns NULL for the column name
+ cur = self.con.cursor()
+ cur.execute("pragma user_version")
+
+def suite():
+ regression_suite = unittest.makeSuite(RegressionTests, "Check")
+ return unittest.TestSuite((regression_suite,))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sqlite3/test/transactions.py b/Lib/sqlite3/test/transactions.py
new file mode 100644
index 0000000..1f0b19a
--- /dev/null
+++ b/Lib/sqlite3/test/transactions.py
@@ -0,0 +1,156 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/transactions.py: tests transactions
+#
+# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import os, unittest
+import sqlite3 as sqlite
+
+def get_db_path():
+ return "sqlite_testdb"
+
+class TransactionTests(unittest.TestCase):
+ def setUp(self):
+ try:
+ os.remove(get_db_path())
+ except:
+ pass
+
+ self.con1 = sqlite.connect(get_db_path(), timeout=0.1)
+ self.cur1 = self.con1.cursor()
+
+ self.con2 = sqlite.connect(get_db_path(), timeout=0.1)
+ self.cur2 = self.con2.cursor()
+
+ def tearDown(self):
+ self.cur1.close()
+ self.con1.close()
+
+ self.cur2.close()
+ self.con2.close()
+
+ os.unlink(get_db_path())
+
+ def CheckDMLdoesAutoCommitBefore(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.cur1.execute("create table test2(j)")
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchall()
+ self.failUnlessEqual(len(res), 1)
+
+ def CheckInsertStartsTransaction(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchall()
+ self.failUnlessEqual(len(res), 0)
+
+ def CheckUpdateStartsTransaction(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.con1.commit()
+ self.cur1.execute("update test set i=6")
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchone()[0]
+ self.failUnlessEqual(res, 5)
+
+ def CheckDeleteStartsTransaction(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.con1.commit()
+ self.cur1.execute("delete from test")
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchall()
+ self.failUnlessEqual(len(res), 1)
+
+ def CheckReplaceStartsTransaction(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.con1.commit()
+ self.cur1.execute("replace into test(i) values (6)")
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchall()
+ self.failUnlessEqual(len(res), 1)
+ self.failUnlessEqual(res[0][0], 5)
+
+ def CheckToggleAutoCommit(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.con1.isolation_level = None
+ self.failUnlessEqual(self.con1.isolation_level, None)
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchall()
+ self.failUnlessEqual(len(res), 1)
+
+ self.con1.isolation_level = "DEFERRED"
+ self.failUnlessEqual(self.con1.isolation_level , "DEFERRED")
+ self.cur1.execute("insert into test(i) values (5)")
+ self.cur2.execute("select i from test")
+ res = self.cur2.fetchall()
+ self.failUnlessEqual(len(res), 1)
+
+ def CheckRaiseTimeout(self):
+ self.cur1.execute("create table test(i)")
+ self.cur1.execute("insert into test(i) values (5)")
+ try:
+ self.cur2.execute("insert into test(i) values (5)")
+ self.fail("should have raised an OperationalError")
+ except sqlite.OperationalError:
+ pass
+ except:
+ self.fail("should have raised an OperationalError")
+
+class SpecialCommandTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+ self.cur = self.con.cursor()
+
+ def CheckVacuum(self):
+ self.cur.execute("create table test(i)")
+ self.cur.execute("insert into test(i) values (5)")
+ self.cur.execute("vacuum")
+
+ def CheckDropTable(self):
+ self.cur.execute("create table test(i)")
+ self.cur.execute("insert into test(i) values (5)")
+ self.cur.execute("drop table test")
+
+ def CheckPragma(self):
+ self.cur.execute("create table test(i)")
+ self.cur.execute("insert into test(i) values (5)")
+ self.cur.execute("pragma count_changes=1")
+
+ def tearDown(self):
+ self.cur.close()
+ self.con.close()
+
+def suite():
+ default_suite = unittest.makeSuite(TransactionTests, "Check")
+ special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
+ return unittest.TestSuite((default_suite, special_command_suite))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sqlite3/test/types.py b/Lib/sqlite3/test/types.py
new file mode 100644
index 0000000..e49f7dd
--- /dev/null
+++ b/Lib/sqlite3/test/types.py
@@ -0,0 +1,339 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/types.py: tests for type conversion and detection
+#
+# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import datetime
+import unittest
+import sqlite3 as sqlite
+
+class SqliteTypeTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+ self.cur = self.con.cursor()
+ self.cur.execute("create table test(i integer, s varchar, f number, b blob)")
+
+ def tearDown(self):
+ self.cur.close()
+ self.con.close()
+
+ def CheckString(self):
+ self.cur.execute("insert into test(s) values (?)", (u"Österreich",))
+ self.cur.execute("select s from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], u"Österreich")
+
+ def CheckSmallInt(self):
+ self.cur.execute("insert into test(i) values (?)", (42,))
+ self.cur.execute("select i from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], 42)
+
+ def CheckLargeInt(self):
+ num = 2**40
+ self.cur.execute("insert into test(i) values (?)", (num,))
+ self.cur.execute("select i from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], num)
+
+ def CheckFloat(self):
+ val = 3.14
+ self.cur.execute("insert into test(f) values (?)", (val,))
+ self.cur.execute("select f from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], val)
+
+ def CheckBlob(self):
+ val = buffer("Guglhupf")
+ self.cur.execute("insert into test(b) values (?)", (val,))
+ self.cur.execute("select b from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], val)
+
+ def CheckUnicodeExecute(self):
+ self.cur.execute(u"select 'Österreich'")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], u"Österreich")
+
+class DeclTypesTests(unittest.TestCase):
+ class Foo:
+ def __init__(self, _val):
+ self.val = _val
+
+ def __cmp__(self, other):
+ if not isinstance(other, DeclTypesTests.Foo):
+ raise ValueError
+ if self.val == other.val:
+ return 0
+ else:
+ return 1
+
+ def __conform__(self, protocol):
+ if protocol is sqlite.PrepareProtocol:
+ return self.val
+ else:
+ return None
+
+ def __str__(self):
+ return "<%s>" % self.val
+
+ def setUp(self):
+ self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
+ self.cur = self.con.cursor()
+ self.cur.execute("create table test(i int, s str, f float, b bool, u unicode, foo foo, bin blob)")
+
+ # override float, make them always return the same number
+ sqlite.converters["float"] = lambda x: 47.2
+
+ # and implement two custom ones
+ sqlite.converters["bool"] = lambda x: bool(int(x))
+ sqlite.converters["foo"] = DeclTypesTests.Foo
+
+ def tearDown(self):
+ del sqlite.converters["float"]
+ del sqlite.converters["bool"]
+ del sqlite.converters["foo"]
+ self.cur.close()
+ self.con.close()
+
+ def CheckString(self):
+ # default
+ self.cur.execute("insert into test(s) values (?)", ("foo",))
+ self.cur.execute("select s from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], "foo")
+
+ def CheckSmallInt(self):
+ # default
+ self.cur.execute("insert into test(i) values (?)", (42,))
+ self.cur.execute("select i from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], 42)
+
+ def CheckLargeInt(self):
+ # default
+ num = 2**40
+ self.cur.execute("insert into test(i) values (?)", (num,))
+ self.cur.execute("select i from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], num)
+
+ def CheckFloat(self):
+ # custom
+ val = 3.14
+ self.cur.execute("insert into test(f) values (?)", (val,))
+ self.cur.execute("select f from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], 47.2)
+
+ def CheckBool(self):
+ # custom
+ self.cur.execute("insert into test(b) values (?)", (False,))
+ self.cur.execute("select b from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], False)
+
+ self.cur.execute("delete from test")
+ self.cur.execute("insert into test(b) values (?)", (True,))
+ self.cur.execute("select b from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], True)
+
+ def CheckUnicode(self):
+ # default
+ val = u"\xd6sterreich"
+ self.cur.execute("insert into test(u) values (?)", (val,))
+ self.cur.execute("select u from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], val)
+
+ def CheckFoo(self):
+ val = DeclTypesTests.Foo("bla")
+ self.cur.execute("insert into test(foo) values (?)", (val,))
+ self.cur.execute("select foo from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], val)
+
+ def CheckUnsupportedSeq(self):
+ class Bar: pass
+ val = Bar()
+ try:
+ self.cur.execute("insert into test(f) values (?)", (val,))
+ self.fail("should have raised an InterfaceError")
+ except sqlite.InterfaceError:
+ pass
+ except:
+ self.fail("should have raised an InterfaceError")
+
+ def CheckUnsupportedDict(self):
+ class Bar: pass
+ val = Bar()
+ try:
+ self.cur.execute("insert into test(f) values (:val)", {"val": val})
+ self.fail("should have raised an InterfaceError")
+ except sqlite.InterfaceError:
+ pass
+ except:
+ self.fail("should have raised an InterfaceError")
+
+ def CheckBlob(self):
+ # default
+ val = buffer("Guglhupf")
+ self.cur.execute("insert into test(bin) values (?)", (val,))
+ self.cur.execute("select bin from test")
+ row = self.cur.fetchone()
+ self.failUnlessEqual(row[0], val)
+
+class ColNamesTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES|sqlite.PARSE_DECLTYPES)
+ self.cur = self.con.cursor()
+ self.cur.execute("create table test(x foo)")
+
+ sqlite.converters["foo"] = lambda x: "[%s]" % x
+ sqlite.converters["bar"] = lambda x: "<%s>" % x
+ sqlite.converters["exc"] = lambda x: 5/0
+
+ def tearDown(self):
+ del sqlite.converters["foo"]
+ del sqlite.converters["bar"]
+ del sqlite.converters["exc"]
+ self.cur.close()
+ self.con.close()
+
+ def CheckDeclType(self):
+ self.cur.execute("insert into test(x) values (?)", ("xxx",))
+ self.cur.execute("select x from test")
+ val = self.cur.fetchone()[0]
+ self.failUnlessEqual(val, "[xxx]")
+
+ def CheckNone(self):
+ self.cur.execute("insert into test(x) values (?)", (None,))
+ self.cur.execute("select x from test")
+ val = self.cur.fetchone()[0]
+ self.failUnlessEqual(val, None)
+
+ def CheckExc(self):
+ # Exceptions in type converters result in returned Nones
+ self.cur.execute('select 5 as "x [exc]"')
+ val = self.cur.fetchone()[0]
+ self.failUnlessEqual(val, None)
+
+ def CheckColName(self):
+ self.cur.execute("insert into test(x) values (?)", ("xxx",))
+ self.cur.execute('select x as "x [bar]" from test')
+ val = self.cur.fetchone()[0]
+ self.failUnlessEqual(val, "<xxx>")
+
+ # Check if the stripping of colnames works. Everything after the first
+ # whitespace should be stripped.
+ self.failUnlessEqual(self.cur.description[0][0], "x")
+
+ def CheckCursorDescriptionNoRow(self):
+ """
+ cursor.description should at least provide the column name(s), even if
+ no row returned.
+ """
+ self.cur.execute("select * from test where 0 = 1")
+ self.assert_(self.cur.description[0][0] == "x")
+
+class ObjectAdaptationTests(unittest.TestCase):
+ def cast(obj):
+ return float(obj)
+ cast = staticmethod(cast)
+
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+ try:
+ del sqlite.adapters[int]
+ except:
+ pass
+ sqlite.register_adapter(int, ObjectAdaptationTests.cast)
+ self.cur = self.con.cursor()
+
+ def tearDown(self):
+ del sqlite.adapters[(int, sqlite.PrepareProtocol)]
+ self.cur.close()
+ self.con.close()
+
+ def CheckCasterIsUsed(self):
+ self.cur.execute("select ?", (4,))
+ val = self.cur.fetchone()[0]
+ self.failUnlessEqual(type(val), float)
+
+class DateTimeTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
+ self.cur = self.con.cursor()
+ self.cur.execute("create table test(d date, ts timestamp)")
+
+ def tearDown(self):
+ self.cur.close()
+ self.con.close()
+
+ def CheckSqliteDate(self):
+ d = sqlite.Date(2004, 2, 14)
+ self.cur.execute("insert into test(d) values (?)", (d,))
+ self.cur.execute("select d from test")
+ d2 = self.cur.fetchone()[0]
+ self.failUnlessEqual(d, d2)
+
+ def CheckSqliteTimestamp(self):
+ ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0)
+ self.cur.execute("insert into test(ts) values (?)", (ts,))
+ self.cur.execute("select ts from test")
+ ts2 = self.cur.fetchone()[0]
+ self.failUnlessEqual(ts, ts2)
+
+ def CheckSqlTimestamp(self):
+ # The date functions are only available in SQLite version 3.1 or later
+ if sqlite.sqlite_version_info < (3, 1):
+ return
+
+ # SQLite's current_timestamp uses UTC time, while datetime.datetime.now() uses local time.
+ now = datetime.datetime.now()
+ self.cur.execute("insert into test(ts) values (current_timestamp)")
+ self.cur.execute("select ts from test")
+ ts = self.cur.fetchone()[0]
+ self.failUnlessEqual(type(ts), datetime.datetime)
+ self.failUnlessEqual(ts.year, now.year)
+
+ def CheckDateTimeSubSeconds(self):
+ ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 500000)
+ self.cur.execute("insert into test(ts) values (?)", (ts,))
+ self.cur.execute("select ts from test")
+ ts2 = self.cur.fetchone()[0]
+ self.failUnlessEqual(ts, ts2)
+
+def suite():
+ sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, "Check")
+ decltypes_type_suite = unittest.makeSuite(DeclTypesTests, "Check")
+ colnames_type_suite = unittest.makeSuite(ColNamesTests, "Check")
+ adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, "Check")
+ date_suite = unittest.makeSuite(DateTimeTests, "Check")
+ return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, date_suite))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sqlite3/test/userfunctions.py b/Lib/sqlite3/test/userfunctions.py
new file mode 100644
index 0000000..ff7db9c
--- /dev/null
+++ b/Lib/sqlite3/test/userfunctions.py
@@ -0,0 +1,330 @@
+#-*- coding: ISO-8859-1 -*-
+# pysqlite2/test/userfunctions.py: tests for user-defined functions and
+# aggregates.
+#
+# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
+#
+# This file is part of pysqlite.
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+import unittest
+import sqlite3 as sqlite
+
+def func_returntext():
+ return "foo"
+def func_returnunicode():
+ return u"bar"
+def func_returnint():
+ return 42
+def func_returnfloat():
+ return 3.14
+def func_returnnull():
+ return None
+def func_returnblob():
+ return buffer("blob")
+def func_raiseexception():
+ 5/0
+
+def func_isstring(v):
+ return type(v) is unicode
+def func_isint(v):
+ return type(v) is int
+def func_isfloat(v):
+ return type(v) is float
+def func_isnone(v):
+ return type(v) is type(None)
+def func_isblob(v):
+ return type(v) is buffer
+
+class AggrNoStep:
+ def __init__(self):
+ pass
+
+class AggrNoFinalize:
+ def __init__(self):
+ pass
+
+ def step(self, x):
+ pass
+
+class AggrExceptionInInit:
+ def __init__(self):
+ 5/0
+
+ def step(self, x):
+ pass
+
+ def finalize(self):
+ pass
+
+class AggrExceptionInStep:
+ def __init__(self):
+ pass
+
+ def step(self, x):
+ 5/0
+
+ def finalize(self):
+ return 42
+
+class AggrExceptionInFinalize:
+ def __init__(self):
+ pass
+
+ def step(self, x):
+ pass
+
+ def finalize(self):
+ 5/0
+
+class AggrCheckType:
+ def __init__(self):
+ self.val = None
+
+ def step(self, whichType, val):
+ theType = {"str": unicode, "int": int, "float": float, "None": type(None), "blob": buffer}
+ self.val = int(theType[whichType] is type(val))
+
+ def finalize(self):
+ return self.val
+
+class AggrSum:
+ def __init__(self):
+ self.val = 0.0
+
+ def step(self, val):
+ self.val += val
+
+ def finalize(self):
+ return self.val
+
+class FunctionTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+
+ self.con.create_function("returntext", 0, func_returntext)
+ self.con.create_function("returnunicode", 0, func_returnunicode)
+ self.con.create_function("returnint", 0, func_returnint)
+ self.con.create_function("returnfloat", 0, func_returnfloat)
+ self.con.create_function("returnnull", 0, func_returnnull)
+ self.con.create_function("returnblob", 0, func_returnblob)
+ self.con.create_function("raiseexception", 0, func_raiseexception)
+
+ self.con.create_function("isstring", 1, func_isstring)
+ self.con.create_function("isint", 1, func_isint)
+ self.con.create_function("isfloat", 1, func_isfloat)
+ self.con.create_function("isnone", 1, func_isnone)
+ self.con.create_function("isblob", 1, func_isblob)
+
+ def tearDown(self):
+ self.con.close()
+
+ def CheckFuncRefCount(self):
+ def getfunc():
+ def f():
+ return val
+ return f
+ self.con.create_function("reftest", 0, getfunc())
+ cur = self.con.cursor()
+ cur.execute("select reftest()")
+
+ def CheckFuncReturnText(self):
+ cur = self.con.cursor()
+ cur.execute("select returntext()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(type(val), unicode)
+ self.failUnlessEqual(val, "foo")
+
+ def CheckFuncReturnUnicode(self):
+ cur = self.con.cursor()
+ cur.execute("select returnunicode()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(type(val), unicode)
+ self.failUnlessEqual(val, u"bar")
+
+ def CheckFuncReturnInt(self):
+ cur = self.con.cursor()
+ cur.execute("select returnint()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(type(val), int)
+ self.failUnlessEqual(val, 42)
+
+ def CheckFuncReturnFloat(self):
+ cur = self.con.cursor()
+ cur.execute("select returnfloat()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(type(val), float)
+ if val < 3.139 or val > 3.141:
+ self.fail("wrong value")
+
+ def CheckFuncReturnNull(self):
+ cur = self.con.cursor()
+ cur.execute("select returnnull()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(type(val), type(None))
+ self.failUnlessEqual(val, None)
+
+ def CheckFuncReturnBlob(self):
+ cur = self.con.cursor()
+ cur.execute("select returnblob()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(type(val), buffer)
+ self.failUnlessEqual(val, buffer("blob"))
+
+ def CheckFuncException(self):
+ cur = self.con.cursor()
+ cur.execute("select raiseexception()")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, None)
+
+ def CheckParamString(self):
+ cur = self.con.cursor()
+ cur.execute("select isstring(?)", ("foo",))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckParamInt(self):
+ cur = self.con.cursor()
+ cur.execute("select isint(?)", (42,))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckParamFloat(self):
+ cur = self.con.cursor()
+ cur.execute("select isfloat(?)", (3.14,))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckParamNone(self):
+ cur = self.con.cursor()
+ cur.execute("select isnone(?)", (None,))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckParamBlob(self):
+ cur = self.con.cursor()
+ cur.execute("select isblob(?)", (buffer("blob"),))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+class AggregateTests(unittest.TestCase):
+ def setUp(self):
+ self.con = sqlite.connect(":memory:")
+ cur = self.con.cursor()
+ cur.execute("""
+ create table test(
+ t text,
+ i integer,
+ f float,
+ n,
+ b blob
+ )
+ """)
+ cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)",
+ ("foo", 5, 3.14, None, buffer("blob"),))
+
+ self.con.create_aggregate("nostep", 1, AggrNoStep)
+ self.con.create_aggregate("nofinalize", 1, AggrNoFinalize)
+ self.con.create_aggregate("excInit", 1, AggrExceptionInInit)
+ self.con.create_aggregate("excStep", 1, AggrExceptionInStep)
+ self.con.create_aggregate("excFinalize", 1, AggrExceptionInFinalize)
+ self.con.create_aggregate("checkType", 2, AggrCheckType)
+ self.con.create_aggregate("mysum", 1, AggrSum)
+
+ def tearDown(self):
+ #self.cur.close()
+ #self.con.close()
+ pass
+
+ def CheckAggrNoStep(self):
+ cur = self.con.cursor()
+ cur.execute("select nostep(t) from test")
+
+ def CheckAggrNoFinalize(self):
+ cur = self.con.cursor()
+ cur.execute("select nofinalize(t) from test")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, None)
+
+ def CheckAggrExceptionInInit(self):
+ cur = self.con.cursor()
+ cur.execute("select excInit(t) from test")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, None)
+
+ def CheckAggrExceptionInStep(self):
+ cur = self.con.cursor()
+ cur.execute("select excStep(t) from test")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 42)
+
+ def CheckAggrExceptionInFinalize(self):
+ cur = self.con.cursor()
+ cur.execute("select excFinalize(t) from test")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, None)
+
+ def CheckAggrCheckParamStr(self):
+ cur = self.con.cursor()
+ cur.execute("select checkType('str', ?)", ("foo",))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckAggrCheckParamInt(self):
+ cur = self.con.cursor()
+ cur.execute("select checkType('int', ?)", (42,))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckAggrCheckParamFloat(self):
+ cur = self.con.cursor()
+ cur.execute("select checkType('float', ?)", (3.14,))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckAggrCheckParamNone(self):
+ cur = self.con.cursor()
+ cur.execute("select checkType('None', ?)", (None,))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckAggrCheckParamBlob(self):
+ cur = self.con.cursor()
+ cur.execute("select checkType('blob', ?)", (buffer("blob"),))
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 1)
+
+ def CheckAggrCheckAggrSum(self):
+ cur = self.con.cursor()
+ cur.execute("delete from test")
+ cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
+ cur.execute("select mysum(i) from test")
+ val = cur.fetchone()[0]
+ self.failUnlessEqual(val, 60)
+
+def suite():
+ function_suite = unittest.makeSuite(FunctionTests, "Check")
+ aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
+ return unittest.TestSuite((function_suite, aggregate_suite))
+
+def test():
+ runner = unittest.TextTestRunner()
+ runner.run(suite())
+
+if __name__ == "__main__":
+ test()
diff --git a/Lib/sre.py b/Lib/sre.py
new file mode 100644
index 0000000..390094a
--- /dev/null
+++ b/Lib/sre.py
@@ -0,0 +1,10 @@
+"""This file is only retained for backwards compatibility.
+It will be removed in the future. sre was moved to re in version 2.5.
+"""
+
+import warnings
+warnings.warn("The sre module is deprecated, please import re.",
+ DeprecationWarning, 2)
+
+from re import *
+from re import __all__
diff --git a/Lib/subprocess.py b/Lib/subprocess.py
index 6827244..3cd0df5 100644
--- a/Lib/subprocess.py
+++ b/Lib/subprocess.py
@@ -414,7 +414,13 @@ _active = []
def _cleanup():
for inst in _active[:]:
- inst.poll()
+ if inst.poll(_deadstate=sys.maxint) >= 0:
+ try:
+ _active.remove(inst)
+ except ValueError:
+ # This can happen if two threads create a new Popen instance.
+ # It's harmless that it was already removed, so ignore.
+ pass
PIPE = -1
STDOUT = -2
@@ -527,6 +533,7 @@ class Popen(object):
"""Create new Popen instance."""
_cleanup()
+ self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
@@ -592,14 +599,24 @@ class Popen(object):
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
- _active.append(self)
-
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
+
+ def __del__(self):
+ if not self._child_created:
+ # We didn't get to successfully create a child process.
+ return
+ # In case the child hasn't been waited on, check if it's done.
+ self.poll(_deadstate=sys.maxint)
+ if self.returncode is None:
+ # Child is still running, keep us alive until we can wait on it.
+ _active.append(self)
+
+
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
@@ -777,6 +794,7 @@ class Popen(object):
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
+ self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
@@ -795,13 +813,12 @@ class Popen(object):
errwrite.Close()
- def poll(self):
+ def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
- _active.remove(self)
return self.returncode
@@ -811,7 +828,6 @@ class Popen(object):
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
- _active.remove(self)
return self.returncode
@@ -958,6 +974,7 @@ class Popen(object):
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
+ self._child_created = True
if self.pid == 0:
# Child
try:
@@ -1042,10 +1059,8 @@ class Popen(object):
# Should never happen
raise RuntimeError("Unknown child exit status!")
- _active.remove(self)
-
- def poll(self):
+ def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
@@ -1054,7 +1069,8 @@ class Popen(object):
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
- pass
+ if _deadstate is not None:
+ self.returncode = _deadstate
return self.returncode
diff --git a/Lib/telnetlib.py b/Lib/telnetlib.py
index 8a2513b..3523037 100644
--- a/Lib/telnetlib.py
+++ b/Lib/telnetlib.py
@@ -438,7 +438,7 @@ class Telnet:
else:
self.iacseq += c
elif len(self.iacseq) == 1:
- 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
+ # 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
diff --git a/Lib/test/check_soundcard.vbs b/Lib/test/check_soundcard.vbs
new file mode 100644
index 0000000..8c21852
--- /dev/null
+++ b/Lib/test/check_soundcard.vbs
@@ -0,0 +1,13 @@
+rem Check for a working sound-card - exit with 0 if OK, 1 otherwise.
+set wmi = GetObject("winmgmts:")
+set scs = wmi.InstancesOf("win32_sounddevice")
+for each sc in scs
+ set status = sc.Properties_("Status")
+ wscript.Echo(sc.Properties_("Name") + "/" + status)
+ if status = "OK" then
+ wscript.Quit 0 rem normal exit
+ end if
+next
+rem No sound card found - exit with status code of 1
+wscript.Quit 1
+
diff --git a/Lib/test/crashers/README b/Lib/test/crashers/README
index 9369282..070c3f1 100644
--- a/Lib/test/crashers/README
+++ b/Lib/test/crashers/README
@@ -13,3 +13,8 @@ Each test should have a link to the bug report:
Put as much info into a docstring or comments to help determine
the cause of the failure. Particularly note if the cause is
system or environment dependent and what the variables are.
+
+Once the crash is fixed, the test case should be moved into an appropriate
+test (even if it was originally from the test suite). This ensures the
+regression doesn't happen again. And if it does, it should be easier
+to track down.
diff --git a/Lib/test/crashers/dictresize_attack.py b/Lib/test/crashers/dictresize_attack.py
new file mode 100644
index 0000000..1895791
--- /dev/null
+++ b/Lib/test/crashers/dictresize_attack.py
@@ -0,0 +1,32 @@
+# http://www.python.org/sf/1456209
+
+# A dictresize() attack. If oldtable == mp->ma_smalltable then pure
+# Python code can mangle with mp->ma_smalltable while it is being walked
+# over.
+
+class X(object):
+
+ def __hash__(self):
+ return 5
+
+ def __eq__(self, other):
+ if resizing:
+ d.clear()
+ return False
+
+
+d = {}
+
+resizing = False
+
+d[X()] = 1
+d[X()] = 2
+d[X()] = 3
+d[X()] = 4
+d[X()] = 5
+
+# now trigger a resize
+resizing = True
+d[9] = 6
+
+# ^^^ I get Segmentation fault or Illegal instruction here.
diff --git a/Lib/test/crashers/nasty_eq_vs_dict.py b/Lib/test/crashers/nasty_eq_vs_dict.py
new file mode 100644
index 0000000..3f3083d
--- /dev/null
+++ b/Lib/test/crashers/nasty_eq_vs_dict.py
@@ -0,0 +1,47 @@
+# from http://mail.python.org/pipermail/python-dev/2001-June/015239.html
+
+# if you keep changing a dictionary while looking up a key, you can
+# provoke an infinite recursion in C
+
+# At the time neither Tim nor Michael could be bothered to think of a
+# way to fix it.
+
+class Yuck:
+ def __init__(self):
+ self.i = 0
+
+ def make_dangerous(self):
+ self.i = 1
+
+ def __hash__(self):
+ # direct to slot 4 in table of size 8; slot 12 when size 16
+ return 4 + 8
+
+ def __eq__(self, other):
+ if self.i == 0:
+ # leave dict alone
+ pass
+ elif self.i == 1:
+ # fiddle to 16 slots
+ self.__fill_dict(6)
+ self.i = 2
+ else:
+ # fiddle to 8 slots
+ self.__fill_dict(4)
+ self.i = 1
+
+ return 1
+
+ def __fill_dict(self, n):
+ self.i = 0
+ dict.clear()
+ for i in range(n):
+ dict[i] = i
+ dict[self] = "OK!"
+
+y = Yuck()
+dict = {y: "OK!"}
+
+z = Yuck()
+y.make_dangerous()
+print dict[z]
diff --git a/Lib/test/empty.vbs b/Lib/test/empty.vbs
new file mode 100644
index 0000000..f35f076
--- /dev/null
+++ b/Lib/test/empty.vbs
@@ -0,0 +1 @@
+'Empty VBS file, does nothing. Helper for Lib\test\test_startfile.py. \ No newline at end of file
diff --git a/Lib/test/fork_wait.py b/Lib/test/fork_wait.py
new file mode 100644
index 0000000..5600bdb
--- /dev/null
+++ b/Lib/test/fork_wait.py
@@ -0,0 +1,71 @@
+"""This test case provides support for checking forking and wait behavior.
+
+To test different wait behavior, overrise the wait_impl method.
+
+We want fork1() semantics -- only the forking thread survives in the
+child after a fork().
+
+On some systems (e.g. Solaris without posix threads) we find that all
+active threads survive in the child after a fork(); this is an error.
+
+While BeOS doesn't officially support fork and native threading in
+the same application, the present example should work just fine. DC
+"""
+
+import os, sys, time, thread, unittest
+from test.test_support import TestSkipped
+
+LONGSLEEP = 2
+SHORTSLEEP = 0.5
+NUM_THREADS = 4
+
+class ForkWait(unittest.TestCase):
+
+ def setUp(self):
+ self.alive = {}
+ self.stop = 0
+
+ def f(self, id):
+ while not self.stop:
+ self.alive[id] = os.getpid()
+ try:
+ time.sleep(SHORTSLEEP)
+ except IOError:
+ pass
+
+ def wait_impl(self, cpid):
+ spid, status = os.waitpid(cpid, 0)
+ self.assertEquals(spid, cpid)
+ self.assertEquals(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
+
+ def test_wait(self):
+ for i in range(NUM_THREADS):
+ thread.start_new(self.f, (i,))
+
+ time.sleep(LONGSLEEP)
+
+ a = self.alive.keys()
+ a.sort()
+ self.assertEquals(a, range(NUM_THREADS))
+
+ prefork_lives = self.alive.copy()
+
+ if sys.platform in ['unixware7']:
+ cpid = os.fork1()
+ else:
+ cpid = os.fork()
+
+ if cpid == 0:
+ # Child
+ time.sleep(LONGSLEEP)
+ n = 0
+ for key in self.alive:
+ if self.alive[key] != prefork_lives[key]:
+ n += 1
+ os._exit(n)
+ else:
+ # Parent
+ self.wait_impl(cpid)
+ # Tell threads to die
+ self.stop = 1
+ time.sleep(2*SHORTSLEEP) # Wait for threads to die
diff --git a/Lib/test/leakers/README.txt b/Lib/test/leakers/README.txt
index 69ee35a..beeee0e 100644
--- a/Lib/test/leakers/README.txt
+++ b/Lib/test/leakers/README.txt
@@ -5,6 +5,15 @@ the interpreter was built in debug mode. If the total ref count
doesn't increase, the bug has been fixed and the file should be removed
from the repository.
+Note: be careful to check for cyclic garbage. Sometimes it may be helpful
+to define the leak function like:
+
+def leak():
+ def inner_leak():
+ # this is the function that leaks, but also creates cycles
+ inner_leak()
+ gc.collect() ; gc.collect() ; gc.collect()
+
Here's an example interpreter session for test_gestalt which still leaks:
>>> from test.leakers.test_gestalt import leak
@@ -17,3 +26,7 @@ Here's an example interpreter session for test_gestalt which still leaks:
[28940 refs]
>>>
+Once the leak is fixed, the test case should be moved into an appropriate
+test (even if it was originally from the test suite). This ensures the
+regression doesn't happen again. And if it does, it should be easier
+to track down.
diff --git a/Lib/test/leakers/test_ctypes.py b/Lib/test/leakers/test_ctypes.py
new file mode 100644
index 0000000..0f9a2cd
--- /dev/null
+++ b/Lib/test/leakers/test_ctypes.py
@@ -0,0 +1,16 @@
+
+# Taken from Lib/ctypes/test/test_keeprefs.py, PointerToStructure.test().
+# When this leak is fixed, remember to remove from Misc/build.sh LEAKY_TESTS.
+
+from ctypes import Structure, c_int, POINTER
+import gc
+
+def leak_inner():
+ class POINT(Structure):
+ _fields_ = [("x", c_int)]
+ class RECT(Structure):
+ _fields_ = [("a", POINTER(POINT))]
+
+def leak():
+ leak_inner()
+ gc.collect()
diff --git a/Lib/test/leakers/test_selftype.py b/Lib/test/leakers/test_selftype.py
new file mode 100644
index 0000000..4207c32
--- /dev/null
+++ b/Lib/test/leakers/test_selftype.py
@@ -0,0 +1,13 @@
+# Reference cycles involving only the ob_type field are rather uncommon
+# but possible. Inspired by SF bug 1469629.
+
+import gc
+
+def leak():
+ class T(type):
+ pass
+ class U(type):
+ __metaclass__ = T
+ U.__class__ = U
+ del U
+ gc.collect(); gc.collect(); gc.collect()
diff --git a/Lib/test/leakers/test_tee.py b/Lib/test/leakers/test_tee.py
deleted file mode 100644
index 4ce24ca..0000000
--- a/Lib/test/leakers/test_tee.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# Test case taken from test_itertools
-# See http://mail.python.org/pipermail/python-dev/2005-November/058339.html
-
-from itertools import tee
-
-def leak():
- def fib():
- def yield_identity_forever(g):
- while 1:
- yield g
- def _fib():
- for i in yield_identity_forever(head):
- yield i
- head, tail, result = tee(_fib(), 3)
- return result
-
- x = fib()
- x.next()
diff --git a/Lib/test/output/test_augassign b/Lib/test/output/test_augassign
deleted file mode 100644
index b66b7e5..0000000
--- a/Lib/test/output/test_augassign
+++ /dev/null
@@ -1,54 +0,0 @@
-test_augassign
-6.0
-6
-[6.0]
-6
-6.0
-6
-[1, 2, 3, 4, 1, 2, 3, 4]
-[1, 2, 1, 2, 3]
-True
-True
-True
-11
-True
-12
-True
-True
-13
-__add__ called
-__radd__ called
-__iadd__ called
-__sub__ called
-__rsub__ called
-__isub__ called
-__mul__ called
-__rmul__ called
-__imul__ called
-__truediv__ called
-__rtruediv__ called
-__itruediv__ called
-__floordiv__ called
-__rfloordiv__ called
-__ifloordiv__ called
-__mod__ called
-__rmod__ called
-__imod__ called
-__pow__ called
-__rpow__ called
-__ipow__ called
-__or__ called
-__ror__ called
-__ior__ called
-__and__ called
-__rand__ called
-__iand__ called
-__xor__ called
-__rxor__ called
-__ixor__ called
-__rshift__ called
-__rrshift__ called
-__irshift__ called
-__lshift__ called
-__rlshift__ called
-__ilshift__ called
diff --git a/Lib/test/output/test_coercion b/Lib/test/output/test_coercion
deleted file mode 100644
index ad35b60..0000000
--- a/Lib/test/output/test_coercion
+++ /dev/null
@@ -1,1054 +0,0 @@
-test_coercion
-2 + 2 = 4
-2 += 2 => 4
-2 - 2 = 0
-2 -= 2 => 0
-2 * 2 = 4
-2 *= 2 => 4
-2 / 2 = 1
-2 /= 2 => 1
-2 ** 2 = 4
-2 **= 2 => 4
-2 % 2 = 0
-2 %= 2 => 0
-2 + 4.0 = 6.0
-2 += 4.0 => 6.0
-2 - 4.0 = -2.0
-2 -= 4.0 => -2.0
-2 * 4.0 = 8.0
-2 *= 4.0 => 8.0
-2 / 4.0 = 0.5
-2 /= 4.0 => 0.5
-2 ** 4.0 = 16.0
-2 **= 4.0 => 16.0
-2 % 4.0 = 2.0
-2 %= 4.0 => 2.0
-2 + 2 = 4
-2 += 2 => 4
-2 - 2 = 0
-2 -= 2 => 0
-2 * 2 = 4
-2 *= 2 => 4
-2 / 2 = 1
-2 /= 2 => 1
-2 ** 2 = 4
-2 **= 2 => 4
-2 % 2 = 0
-2 %= 2 => 0
-2 + (2+0j) = (4.0 + 0.0j)
-2 += (2+0j) => (4.0 + 0.0j)
-2 - (2+0j) = (0.0 + 0.0j)
-2 -= (2+0j) => (0.0 + 0.0j)
-2 * (2+0j) = (4.0 + 0.0j)
-2 *= (2+0j) => (4.0 + 0.0j)
-2 / (2+0j) = (1.0 + 0.0j)
-2 /= (2+0j) => (1.0 + 0.0j)
-2 ** (2+0j) = (4.0 + 0.0j)
-2 **= (2+0j) => (4.0 + 0.0j)
-2 % (2+0j) = (0.0 + 0.0j)
-2 %= (2+0j) => (0.0 + 0.0j)
-2 + [1] ... exceptions.TypeError
-2 += [1] ... exceptions.TypeError
-2 - [1] ... exceptions.TypeError
-2 -= [1] ... exceptions.TypeError
-2 * [1] = [1, 1]
-2 *= [1] => [1, 1]
-2 / [1] ... exceptions.TypeError
-2 /= [1] ... exceptions.TypeError
-2 ** [1] ... exceptions.TypeError
-2 **= [1] ... exceptions.TypeError
-2 % [1] ... exceptions.TypeError
-2 %= [1] ... exceptions.TypeError
-2 + (2,) ... exceptions.TypeError
-2 += (2,) ... exceptions.TypeError
-2 - (2,) ... exceptions.TypeError
-2 -= (2,) ... exceptions.TypeError
-2 * (2,) = (2, 2)
-2 *= (2,) => (2, 2)
-2 / (2,) ... exceptions.TypeError
-2 /= (2,) ... exceptions.TypeError
-2 ** (2,) ... exceptions.TypeError
-2 **= (2,) ... exceptions.TypeError
-2 % (2,) ... exceptions.TypeError
-2 %= (2,) ... exceptions.TypeError
-2 + None ... exceptions.TypeError
-2 += None ... exceptions.TypeError
-2 - None ... exceptions.TypeError
-2 -= None ... exceptions.TypeError
-2 * None ... exceptions.TypeError
-2 *= None ... exceptions.TypeError
-2 / None ... exceptions.TypeError
-2 /= None ... exceptions.TypeError
-2 ** None ... exceptions.TypeError
-2 **= None ... exceptions.TypeError
-2 % None ... exceptions.TypeError
-2 %= None ... exceptions.TypeError
-2 + <MethodNumber 2> = 4
-2 += <MethodNumber 2> => 4
-2 - <MethodNumber 2> = 0
-2 -= <MethodNumber 2> => 0
-2 * <MethodNumber 2> = 4
-2 *= <MethodNumber 2> => 4
-2 / <MethodNumber 2> = 1
-2 /= <MethodNumber 2> => 1
-2 ** <MethodNumber 2> = 4
-2 **= <MethodNumber 2> => 4
-2 % <MethodNumber 2> = 0
-2 %= <MethodNumber 2> => 0
-2 + <CoerceNumber 2> = 4
-2 += <CoerceNumber 2> => 4
-2 - <CoerceNumber 2> = 0
-2 -= <CoerceNumber 2> => 0
-2 * <CoerceNumber 2> = 4
-2 *= <CoerceNumber 2> => 4
-2 / <CoerceNumber 2> = 1
-2 /= <CoerceNumber 2> => 1
-2 ** <CoerceNumber 2> = 4
-2 **= <CoerceNumber 2> => 4
-2 % <CoerceNumber 2> = 0
-2 %= <CoerceNumber 2> => 0
-4.0 + 2 = 6.0
-4.0 += 2 => 6.0
-4.0 - 2 = 2.0
-4.0 -= 2 => 2.0
-4.0 * 2 = 8.0
-4.0 *= 2 => 8.0
-4.0 / 2 = 2.0
-4.0 /= 2 => 2.0
-4.0 ** 2 = 16.0
-4.0 **= 2 => 16.0
-4.0 % 2 = 0.0
-4.0 %= 2 => 0.0
-4.0 + 4.0 = 8.0
-4.0 += 4.0 => 8.0
-4.0 - 4.0 = 0.0
-4.0 -= 4.0 => 0.0
-4.0 * 4.0 = 16.0
-4.0 *= 4.0 => 16.0
-4.0 / 4.0 = 1.0
-4.0 /= 4.0 => 1.0
-4.0 ** 4.0 = 256.0
-4.0 **= 4.0 => 256.0
-4.0 % 4.0 = 0.0
-4.0 %= 4.0 => 0.0
-4.0 + 2 = 6.0
-4.0 += 2 => 6.0
-4.0 - 2 = 2.0
-4.0 -= 2 => 2.0
-4.0 * 2 = 8.0
-4.0 *= 2 => 8.0
-4.0 / 2 = 2.0
-4.0 /= 2 => 2.0
-4.0 ** 2 = 16.0
-4.0 **= 2 => 16.0
-4.0 % 2 = 0.0
-4.0 %= 2 => 0.0
-4.0 + (2+0j) = (6.0 + 0.0j)
-4.0 += (2+0j) => (6.0 + 0.0j)
-4.0 - (2+0j) = (2.0 + 0.0j)
-4.0 -= (2+0j) => (2.0 + 0.0j)
-4.0 * (2+0j) = (8.0 + 0.0j)
-4.0 *= (2+0j) => (8.0 + 0.0j)
-4.0 / (2+0j) = (2.0 + 0.0j)
-4.0 /= (2+0j) => (2.0 + 0.0j)
-4.0 ** (2+0j) = (16.0 + 0.0j)
-4.0 **= (2+0j) => (16.0 + 0.0j)
-4.0 % (2+0j) = (0.0 + 0.0j)
-4.0 %= (2+0j) => (0.0 + 0.0j)
-4.0 + [1] ... exceptions.TypeError
-4.0 += [1] ... exceptions.TypeError
-4.0 - [1] ... exceptions.TypeError
-4.0 -= [1] ... exceptions.TypeError
-4.0 * [1] ... exceptions.TypeError
-4.0 *= [1] ... exceptions.TypeError
-4.0 / [1] ... exceptions.TypeError
-4.0 /= [1] ... exceptions.TypeError
-4.0 ** [1] ... exceptions.TypeError
-4.0 **= [1] ... exceptions.TypeError
-4.0 % [1] ... exceptions.TypeError
-4.0 %= [1] ... exceptions.TypeError
-4.0 + (2,) ... exceptions.TypeError
-4.0 += (2,) ... exceptions.TypeError
-4.0 - (2,) ... exceptions.TypeError
-4.0 -= (2,) ... exceptions.TypeError
-4.0 * (2,) ... exceptions.TypeError
-4.0 *= (2,) ... exceptions.TypeError
-4.0 / (2,) ... exceptions.TypeError
-4.0 /= (2,) ... exceptions.TypeError
-4.0 ** (2,) ... exceptions.TypeError
-4.0 **= (2,) ... exceptions.TypeError
-4.0 % (2,) ... exceptions.TypeError
-4.0 %= (2,) ... exceptions.TypeError
-4.0 + None ... exceptions.TypeError
-4.0 += None ... exceptions.TypeError
-4.0 - None ... exceptions.TypeError
-4.0 -= None ... exceptions.TypeError
-4.0 * None ... exceptions.TypeError
-4.0 *= None ... exceptions.TypeError
-4.0 / None ... exceptions.TypeError
-4.0 /= None ... exceptions.TypeError
-4.0 ** None ... exceptions.TypeError
-4.0 **= None ... exceptions.TypeError
-4.0 % None ... exceptions.TypeError
-4.0 %= None ... exceptions.TypeError
-4.0 + <MethodNumber 2> = 6.0
-4.0 += <MethodNumber 2> => 6.0
-4.0 - <MethodNumber 2> = 2.0
-4.0 -= <MethodNumber 2> => 2.0
-4.0 * <MethodNumber 2> = 8.0
-4.0 *= <MethodNumber 2> => 8.0
-4.0 / <MethodNumber 2> = 2.0
-4.0 /= <MethodNumber 2> => 2.0
-4.0 ** <MethodNumber 2> = 16.0
-4.0 **= <MethodNumber 2> => 16.0
-4.0 % <MethodNumber 2> = 0.0
-4.0 %= <MethodNumber 2> => 0.0
-4.0 + <CoerceNumber 2> = 6.0
-4.0 += <CoerceNumber 2> => 6.0
-4.0 - <CoerceNumber 2> = 2.0
-4.0 -= <CoerceNumber 2> => 2.0
-4.0 * <CoerceNumber 2> = 8.0
-4.0 *= <CoerceNumber 2> => 8.0
-4.0 / <CoerceNumber 2> = 2.0
-4.0 /= <CoerceNumber 2> => 2.0
-4.0 ** <CoerceNumber 2> = 16.0
-4.0 **= <CoerceNumber 2> => 16.0
-4.0 % <CoerceNumber 2> = 0.0
-4.0 %= <CoerceNumber 2> => 0.0
-2 + 2 = 4
-2 += 2 => 4
-2 - 2 = 0
-2 -= 2 => 0
-2 * 2 = 4
-2 *= 2 => 4
-2 / 2 = 1
-2 /= 2 => 1
-2 ** 2 = 4
-2 **= 2 => 4
-2 % 2 = 0
-2 %= 2 => 0
-2 + 4.0 = 6.0
-2 += 4.0 => 6.0
-2 - 4.0 = -2.0
-2 -= 4.0 => -2.0
-2 * 4.0 = 8.0
-2 *= 4.0 => 8.0
-2 / 4.0 = 0.5
-2 /= 4.0 => 0.5
-2 ** 4.0 = 16.0
-2 **= 4.0 => 16.0
-2 % 4.0 = 2.0
-2 %= 4.0 => 2.0
-2 + 2 = 4
-2 += 2 => 4
-2 - 2 = 0
-2 -= 2 => 0
-2 * 2 = 4
-2 *= 2 => 4
-2 / 2 = 1
-2 /= 2 => 1
-2 ** 2 = 4
-2 **= 2 => 4
-2 % 2 = 0
-2 %= 2 => 0
-2 + (2+0j) = (4.0 + 0.0j)
-2 += (2+0j) => (4.0 + 0.0j)
-2 - (2+0j) = (0.0 + 0.0j)
-2 -= (2+0j) => (0.0 + 0.0j)
-2 * (2+0j) = (4.0 + 0.0j)
-2 *= (2+0j) => (4.0 + 0.0j)
-2 / (2+0j) = (1.0 + 0.0j)
-2 /= (2+0j) => (1.0 + 0.0j)
-2 ** (2+0j) = (4.0 + 0.0j)
-2 **= (2+0j) => (4.0 + 0.0j)
-2 % (2+0j) = (0.0 + 0.0j)
-2 %= (2+0j) => (0.0 + 0.0j)
-2 + [1] ... exceptions.TypeError
-2 += [1] ... exceptions.TypeError
-2 - [1] ... exceptions.TypeError
-2 -= [1] ... exceptions.TypeError
-2 * [1] = [1, 1]
-2 *= [1] => [1, 1]
-2 / [1] ... exceptions.TypeError
-2 /= [1] ... exceptions.TypeError
-2 ** [1] ... exceptions.TypeError
-2 **= [1] ... exceptions.TypeError
-2 % [1] ... exceptions.TypeError
-2 %= [1] ... exceptions.TypeError
-2 + (2,) ... exceptions.TypeError
-2 += (2,) ... exceptions.TypeError
-2 - (2,) ... exceptions.TypeError
-2 -= (2,) ... exceptions.TypeError
-2 * (2,) = (2, 2)
-2 *= (2,) => (2, 2)
-2 / (2,) ... exceptions.TypeError
-2 /= (2,) ... exceptions.TypeError
-2 ** (2,) ... exceptions.TypeError
-2 **= (2,) ... exceptions.TypeError
-2 % (2,) ... exceptions.TypeError
-2 %= (2,) ... exceptions.TypeError
-2 + None ... exceptions.TypeError
-2 += None ... exceptions.TypeError
-2 - None ... exceptions.TypeError
-2 -= None ... exceptions.TypeError
-2 * None ... exceptions.TypeError
-2 *= None ... exceptions.TypeError
-2 / None ... exceptions.TypeError
-2 /= None ... exceptions.TypeError
-2 ** None ... exceptions.TypeError
-2 **= None ... exceptions.TypeError
-2 % None ... exceptions.TypeError
-2 %= None ... exceptions.TypeError
-2 + <MethodNumber 2> = 4
-2 += <MethodNumber 2> => 4
-2 - <MethodNumber 2> = 0
-2 -= <MethodNumber 2> => 0
-2 * <MethodNumber 2> = 4
-2 *= <MethodNumber 2> => 4
-2 / <MethodNumber 2> = 1
-2 /= <MethodNumber 2> => 1
-2 ** <MethodNumber 2> = 4
-2 **= <MethodNumber 2> => 4
-2 % <MethodNumber 2> = 0
-2 %= <MethodNumber 2> => 0
-2 + <CoerceNumber 2> = 4
-2 += <CoerceNumber 2> => 4
-2 - <CoerceNumber 2> = 0
-2 -= <CoerceNumber 2> => 0
-2 * <CoerceNumber 2> = 4
-2 *= <CoerceNumber 2> => 4
-2 / <CoerceNumber 2> = 1
-2 /= <CoerceNumber 2> => 1
-2 ** <CoerceNumber 2> = 4
-2 **= <CoerceNumber 2> => 4
-2 % <CoerceNumber 2> = 0
-2 %= <CoerceNumber 2> => 0
-(2+0j) + 2 = (4.0 + 0.0j)
-(2+0j) += 2 => (4.0 + 0.0j)
-(2+0j) - 2 = (0.0 + 0.0j)
-(2+0j) -= 2 => (0.0 + 0.0j)
-(2+0j) * 2 = (4.0 + 0.0j)
-(2+0j) *= 2 => (4.0 + 0.0j)
-(2+0j) / 2 = (1.0 + 0.0j)
-(2+0j) /= 2 => (1.0 + 0.0j)
-(2+0j) ** 2 = (4.0 + 0.0j)
-(2+0j) **= 2 => (4.0 + 0.0j)
-(2+0j) % 2 = (0.0 + 0.0j)
-(2+0j) %= 2 => (0.0 + 0.0j)
-(2+0j) + 4.0 = (6.0 + 0.0j)
-(2+0j) += 4.0 => (6.0 + 0.0j)
-(2+0j) - 4.0 = (-2.0 + 0.0j)
-(2+0j) -= 4.0 => (-2.0 + 0.0j)
-(2+0j) * 4.0 = (8.0 + 0.0j)
-(2+0j) *= 4.0 => (8.0 + 0.0j)
-(2+0j) / 4.0 = (0.5 + 0.0j)
-(2+0j) /= 4.0 => (0.5 + 0.0j)
-(2+0j) ** 4.0 = (16.0 + 0.0j)
-(2+0j) **= 4.0 => (16.0 + 0.0j)
-(2+0j) % 4.0 = (2.0 + 0.0j)
-(2+0j) %= 4.0 => (2.0 + 0.0j)
-(2+0j) + 2 = (4.0 + 0.0j)
-(2+0j) += 2 => (4.0 + 0.0j)
-(2+0j) - 2 = (0.0 + 0.0j)
-(2+0j) -= 2 => (0.0 + 0.0j)
-(2+0j) * 2 = (4.0 + 0.0j)
-(2+0j) *= 2 => (4.0 + 0.0j)
-(2+0j) / 2 = (1.0 + 0.0j)
-(2+0j) /= 2 => (1.0 + 0.0j)
-(2+0j) ** 2 = (4.0 + 0.0j)
-(2+0j) **= 2 => (4.0 + 0.0j)
-(2+0j) % 2 = (0.0 + 0.0j)
-(2+0j) %= 2 => (0.0 + 0.0j)
-(2+0j) + (2+0j) = (4.0 + 0.0j)
-(2+0j) += (2+0j) => (4.0 + 0.0j)
-(2+0j) - (2+0j) = (0.0 + 0.0j)
-(2+0j) -= (2+0j) => (0.0 + 0.0j)
-(2+0j) * (2+0j) = (4.0 + 0.0j)
-(2+0j) *= (2+0j) => (4.0 + 0.0j)
-(2+0j) / (2+0j) = (1.0 + 0.0j)
-(2+0j) /= (2+0j) => (1.0 + 0.0j)
-(2+0j) ** (2+0j) = (4.0 + 0.0j)
-(2+0j) **= (2+0j) => (4.0 + 0.0j)
-(2+0j) % (2+0j) = (0.0 + 0.0j)
-(2+0j) %= (2+0j) => (0.0 + 0.0j)
-(2+0j) + [1] ... exceptions.TypeError
-(2+0j) += [1] ... exceptions.TypeError
-(2+0j) - [1] ... exceptions.TypeError
-(2+0j) -= [1] ... exceptions.TypeError
-(2+0j) * [1] ... exceptions.TypeError
-(2+0j) *= [1] ... exceptions.TypeError
-(2+0j) / [1] ... exceptions.TypeError
-(2+0j) /= [1] ... exceptions.TypeError
-(2+0j) ** [1] ... exceptions.TypeError
-(2+0j) **= [1] ... exceptions.TypeError
-(2+0j) % [1] ... exceptions.TypeError
-(2+0j) %= [1] ... exceptions.TypeError
-(2+0j) + (2,) ... exceptions.TypeError
-(2+0j) += (2,) ... exceptions.TypeError
-(2+0j) - (2,) ... exceptions.TypeError
-(2+0j) -= (2,) ... exceptions.TypeError
-(2+0j) * (2,) ... exceptions.TypeError
-(2+0j) *= (2,) ... exceptions.TypeError
-(2+0j) / (2,) ... exceptions.TypeError
-(2+0j) /= (2,) ... exceptions.TypeError
-(2+0j) ** (2,) ... exceptions.TypeError
-(2+0j) **= (2,) ... exceptions.TypeError
-(2+0j) % (2,) ... exceptions.TypeError
-(2+0j) %= (2,) ... exceptions.TypeError
-(2+0j) + None ... exceptions.TypeError
-(2+0j) += None ... exceptions.TypeError
-(2+0j) - None ... exceptions.TypeError
-(2+0j) -= None ... exceptions.TypeError
-(2+0j) * None ... exceptions.TypeError
-(2+0j) *= None ... exceptions.TypeError
-(2+0j) / None ... exceptions.TypeError
-(2+0j) /= None ... exceptions.TypeError
-(2+0j) ** None ... exceptions.TypeError
-(2+0j) **= None ... exceptions.TypeError
-(2+0j) % None ... exceptions.TypeError
-(2+0j) %= None ... exceptions.TypeError
-(2+0j) + <MethodNumber 2> = (4.0 + 0.0j)
-(2+0j) += <MethodNumber 2> => (4.0 + 0.0j)
-(2+0j) - <MethodNumber 2> = (0.0 + 0.0j)
-(2+0j) -= <MethodNumber 2> => (0.0 + 0.0j)
-(2+0j) * <MethodNumber 2> = (4.0 + 0.0j)
-(2+0j) *= <MethodNumber 2> => (4.0 + 0.0j)
-(2+0j) / <MethodNumber 2> = (1.0 + 0.0j)
-(2+0j) /= <MethodNumber 2> => (1.0 + 0.0j)
-(2+0j) ** <MethodNumber 2> = (4.0 + 0.0j)
-(2+0j) **= <MethodNumber 2> => (4.0 + 0.0j)
-(2+0j) % <MethodNumber 2> = (0.0 + 0.0j)
-(2+0j) %= <MethodNumber 2> => (0.0 + 0.0j)
-(2+0j) + <CoerceNumber 2> = (4.0 + 0.0j)
-(2+0j) += <CoerceNumber 2> => (4.0 + 0.0j)
-(2+0j) - <CoerceNumber 2> = (0.0 + 0.0j)
-(2+0j) -= <CoerceNumber 2> => (0.0 + 0.0j)
-(2+0j) * <CoerceNumber 2> = (4.0 + 0.0j)
-(2+0j) *= <CoerceNumber 2> => (4.0 + 0.0j)
-(2+0j) / <CoerceNumber 2> = (1.0 + 0.0j)
-(2+0j) /= <CoerceNumber 2> => (1.0 + 0.0j)
-(2+0j) ** <CoerceNumber 2> = (4.0 + 0.0j)
-(2+0j) **= <CoerceNumber 2> => (4.0 + 0.0j)
-(2+0j) % <CoerceNumber 2> = (0.0 + 0.0j)
-(2+0j) %= <CoerceNumber 2> => (0.0 + 0.0j)
-[1] + 2 ... exceptions.TypeError
-[1] += 2 ... exceptions.TypeError
-[1] - 2 ... exceptions.TypeError
-[1] -= 2 ... exceptions.TypeError
-[1] * 2 = [1, 1]
-[1] *= 2 => [1, 1]
-[1] / 2 ... exceptions.TypeError
-[1] /= 2 ... exceptions.TypeError
-[1] ** 2 ... exceptions.TypeError
-[1] **= 2 ... exceptions.TypeError
-[1] % 2 ... exceptions.TypeError
-[1] %= 2 ... exceptions.TypeError
-[1] + 4.0 ... exceptions.TypeError
-[1] += 4.0 ... exceptions.TypeError
-[1] - 4.0 ... exceptions.TypeError
-[1] -= 4.0 ... exceptions.TypeError
-[1] * 4.0 ... exceptions.TypeError
-[1] *= 4.0 ... exceptions.TypeError
-[1] / 4.0 ... exceptions.TypeError
-[1] /= 4.0 ... exceptions.TypeError
-[1] ** 4.0 ... exceptions.TypeError
-[1] **= 4.0 ... exceptions.TypeError
-[1] % 4.0 ... exceptions.TypeError
-[1] %= 4.0 ... exceptions.TypeError
-[1] + 2 ... exceptions.TypeError
-[1] += 2 ... exceptions.TypeError
-[1] - 2 ... exceptions.TypeError
-[1] -= 2 ... exceptions.TypeError
-[1] * 2 = [1, 1]
-[1] *= 2 => [1, 1]
-[1] / 2 ... exceptions.TypeError
-[1] /= 2 ... exceptions.TypeError
-[1] ** 2 ... exceptions.TypeError
-[1] **= 2 ... exceptions.TypeError
-[1] % 2 ... exceptions.TypeError
-[1] %= 2 ... exceptions.TypeError
-[1] + (2+0j) ... exceptions.TypeError
-[1] += (2+0j) ... exceptions.TypeError
-[1] - (2+0j) ... exceptions.TypeError
-[1] -= (2+0j) ... exceptions.TypeError
-[1] * (2+0j) ... exceptions.TypeError
-[1] *= (2+0j) ... exceptions.TypeError
-[1] / (2+0j) ... exceptions.TypeError
-[1] /= (2+0j) ... exceptions.TypeError
-[1] ** (2+0j) ... exceptions.TypeError
-[1] **= (2+0j) ... exceptions.TypeError
-[1] % (2+0j) ... exceptions.TypeError
-[1] %= (2+0j) ... exceptions.TypeError
-[1] + [1] = [1, 1]
-[1] += [1] => [1, 1]
-[1] - [1] ... exceptions.TypeError
-[1] -= [1] ... exceptions.TypeError
-[1] * [1] ... exceptions.TypeError
-[1] *= [1] ... exceptions.TypeError
-[1] / [1] ... exceptions.TypeError
-[1] /= [1] ... exceptions.TypeError
-[1] ** [1] ... exceptions.TypeError
-[1] **= [1] ... exceptions.TypeError
-[1] % [1] ... exceptions.TypeError
-[1] %= [1] ... exceptions.TypeError
-[1] + (2,) ... exceptions.TypeError
-[1] += (2,) => [1, 2]
-[1] - (2,) ... exceptions.TypeError
-[1] -= (2,) ... exceptions.TypeError
-[1] * (2,) ... exceptions.TypeError
-[1] *= (2,) ... exceptions.TypeError
-[1] / (2,) ... exceptions.TypeError
-[1] /= (2,) ... exceptions.TypeError
-[1] ** (2,) ... exceptions.TypeError
-[1] **= (2,) ... exceptions.TypeError
-[1] % (2,) ... exceptions.TypeError
-[1] %= (2,) ... exceptions.TypeError
-[1] + None ... exceptions.TypeError
-[1] += None ... exceptions.TypeError
-[1] - None ... exceptions.TypeError
-[1] -= None ... exceptions.TypeError
-[1] * None ... exceptions.TypeError
-[1] *= None ... exceptions.TypeError
-[1] / None ... exceptions.TypeError
-[1] /= None ... exceptions.TypeError
-[1] ** None ... exceptions.TypeError
-[1] **= None ... exceptions.TypeError
-[1] % None ... exceptions.TypeError
-[1] %= None ... exceptions.TypeError
-[1] + <MethodNumber 2> ... exceptions.TypeError
-[1] += <MethodNumber 2> ... exceptions.TypeError
-[1] - <MethodNumber 2> ... exceptions.TypeError
-[1] -= <MethodNumber 2> ... exceptions.TypeError
-[1] * <MethodNumber 2> = [1, 1]
-[1] *= <MethodNumber 2> => [1, 1]
-[1] / <MethodNumber 2> ... exceptions.TypeError
-[1] /= <MethodNumber 2> ... exceptions.TypeError
-[1] ** <MethodNumber 2> ... exceptions.TypeError
-[1] **= <MethodNumber 2> ... exceptions.TypeError
-[1] % <MethodNumber 2> ... exceptions.TypeError
-[1] %= <MethodNumber 2> ... exceptions.TypeError
-[1] + <CoerceNumber 2> ... exceptions.TypeError
-[1] += <CoerceNumber 2> ... exceptions.TypeError
-[1] - <CoerceNumber 2> ... exceptions.TypeError
-[1] -= <CoerceNumber 2> ... exceptions.TypeError
-[1] * <CoerceNumber 2> = [1, 1]
-[1] *= <CoerceNumber 2> => [1, 1]
-[1] / <CoerceNumber 2> ... exceptions.TypeError
-[1] /= <CoerceNumber 2> ... exceptions.TypeError
-[1] ** <CoerceNumber 2> ... exceptions.TypeError
-[1] **= <CoerceNumber 2> ... exceptions.TypeError
-[1] % <CoerceNumber 2> ... exceptions.TypeError
-[1] %= <CoerceNumber 2> ... exceptions.TypeError
-(2,) + 2 ... exceptions.TypeError
-(2,) += 2 ... exceptions.TypeError
-(2,) - 2 ... exceptions.TypeError
-(2,) -= 2 ... exceptions.TypeError
-(2,) * 2 = (2, 2)
-(2,) *= 2 => (2, 2)
-(2,) / 2 ... exceptions.TypeError
-(2,) /= 2 ... exceptions.TypeError
-(2,) ** 2 ... exceptions.TypeError
-(2,) **= 2 ... exceptions.TypeError
-(2,) % 2 ... exceptions.TypeError
-(2,) %= 2 ... exceptions.TypeError
-(2,) + 4.0 ... exceptions.TypeError
-(2,) += 4.0 ... exceptions.TypeError
-(2,) - 4.0 ... exceptions.TypeError
-(2,) -= 4.0 ... exceptions.TypeError
-(2,) * 4.0 ... exceptions.TypeError
-(2,) *= 4.0 ... exceptions.TypeError
-(2,) / 4.0 ... exceptions.TypeError
-(2,) /= 4.0 ... exceptions.TypeError
-(2,) ** 4.0 ... exceptions.TypeError
-(2,) **= 4.0 ... exceptions.TypeError
-(2,) % 4.0 ... exceptions.TypeError
-(2,) %= 4.0 ... exceptions.TypeError
-(2,) + 2 ... exceptions.TypeError
-(2,) += 2 ... exceptions.TypeError
-(2,) - 2 ... exceptions.TypeError
-(2,) -= 2 ... exceptions.TypeError
-(2,) * 2 = (2, 2)
-(2,) *= 2 => (2, 2)
-(2,) / 2 ... exceptions.TypeError
-(2,) /= 2 ... exceptions.TypeError
-(2,) ** 2 ... exceptions.TypeError
-(2,) **= 2 ... exceptions.TypeError
-(2,) % 2 ... exceptions.TypeError
-(2,) %= 2 ... exceptions.TypeError
-(2,) + (2+0j) ... exceptions.TypeError
-(2,) += (2+0j) ... exceptions.TypeError
-(2,) - (2+0j) ... exceptions.TypeError
-(2,) -= (2+0j) ... exceptions.TypeError
-(2,) * (2+0j) ... exceptions.TypeError
-(2,) *= (2+0j) ... exceptions.TypeError
-(2,) / (2+0j) ... exceptions.TypeError
-(2,) /= (2+0j) ... exceptions.TypeError
-(2,) ** (2+0j) ... exceptions.TypeError
-(2,) **= (2+0j) ... exceptions.TypeError
-(2,) % (2+0j) ... exceptions.TypeError
-(2,) %= (2+0j) ... exceptions.TypeError
-(2,) + [1] ... exceptions.TypeError
-(2,) += [1] ... exceptions.TypeError
-(2,) - [1] ... exceptions.TypeError
-(2,) -= [1] ... exceptions.TypeError
-(2,) * [1] ... exceptions.TypeError
-(2,) *= [1] ... exceptions.TypeError
-(2,) / [1] ... exceptions.TypeError
-(2,) /= [1] ... exceptions.TypeError
-(2,) ** [1] ... exceptions.TypeError
-(2,) **= [1] ... exceptions.TypeError
-(2,) % [1] ... exceptions.TypeError
-(2,) %= [1] ... exceptions.TypeError
-(2,) + (2,) = (2, 2)
-(2,) += (2,) => (2, 2)
-(2,) - (2,) ... exceptions.TypeError
-(2,) -= (2,) ... exceptions.TypeError
-(2,) * (2,) ... exceptions.TypeError
-(2,) *= (2,) ... exceptions.TypeError
-(2,) / (2,) ... exceptions.TypeError
-(2,) /= (2,) ... exceptions.TypeError
-(2,) ** (2,) ... exceptions.TypeError
-(2,) **= (2,) ... exceptions.TypeError
-(2,) % (2,) ... exceptions.TypeError
-(2,) %= (2,) ... exceptions.TypeError
-(2,) + None ... exceptions.TypeError
-(2,) += None ... exceptions.TypeError
-(2,) - None ... exceptions.TypeError
-(2,) -= None ... exceptions.TypeError
-(2,) * None ... exceptions.TypeError
-(2,) *= None ... exceptions.TypeError
-(2,) / None ... exceptions.TypeError
-(2,) /= None ... exceptions.TypeError
-(2,) ** None ... exceptions.TypeError
-(2,) **= None ... exceptions.TypeError
-(2,) % None ... exceptions.TypeError
-(2,) %= None ... exceptions.TypeError
-(2,) + <MethodNumber 2> ... exceptions.TypeError
-(2,) += <MethodNumber 2> ... exceptions.TypeError
-(2,) - <MethodNumber 2> ... exceptions.TypeError
-(2,) -= <MethodNumber 2> ... exceptions.TypeError
-(2,) * <MethodNumber 2> = (2, 2)
-(2,) *= <MethodNumber 2> => (2, 2)
-(2,) / <MethodNumber 2> ... exceptions.TypeError
-(2,) /= <MethodNumber 2> ... exceptions.TypeError
-(2,) ** <MethodNumber 2> ... exceptions.TypeError
-(2,) **= <MethodNumber 2> ... exceptions.TypeError
-(2,) % <MethodNumber 2> ... exceptions.TypeError
-(2,) %= <MethodNumber 2> ... exceptions.TypeError
-(2,) + <CoerceNumber 2> ... exceptions.TypeError
-(2,) += <CoerceNumber 2> ... exceptions.TypeError
-(2,) - <CoerceNumber 2> ... exceptions.TypeError
-(2,) -= <CoerceNumber 2> ... exceptions.TypeError
-(2,) * <CoerceNumber 2> = (2, 2)
-(2,) *= <CoerceNumber 2> => (2, 2)
-(2,) / <CoerceNumber 2> ... exceptions.TypeError
-(2,) /= <CoerceNumber 2> ... exceptions.TypeError
-(2,) ** <CoerceNumber 2> ... exceptions.TypeError
-(2,) **= <CoerceNumber 2> ... exceptions.TypeError
-(2,) % <CoerceNumber 2> ... exceptions.TypeError
-(2,) %= <CoerceNumber 2> ... exceptions.TypeError
-None + 2 ... exceptions.TypeError
-None += 2 ... exceptions.TypeError
-None - 2 ... exceptions.TypeError
-None -= 2 ... exceptions.TypeError
-None * 2 ... exceptions.TypeError
-None *= 2 ... exceptions.TypeError
-None / 2 ... exceptions.TypeError
-None /= 2 ... exceptions.TypeError
-None ** 2 ... exceptions.TypeError
-None **= 2 ... exceptions.TypeError
-None % 2 ... exceptions.TypeError
-None %= 2 ... exceptions.TypeError
-None + 4.0 ... exceptions.TypeError
-None += 4.0 ... exceptions.TypeError
-None - 4.0 ... exceptions.TypeError
-None -= 4.0 ... exceptions.TypeError
-None * 4.0 ... exceptions.TypeError
-None *= 4.0 ... exceptions.TypeError
-None / 4.0 ... exceptions.TypeError
-None /= 4.0 ... exceptions.TypeError
-None ** 4.0 ... exceptions.TypeError
-None **= 4.0 ... exceptions.TypeError
-None % 4.0 ... exceptions.TypeError
-None %= 4.0 ... exceptions.TypeError
-None + 2 ... exceptions.TypeError
-None += 2 ... exceptions.TypeError
-None - 2 ... exceptions.TypeError
-None -= 2 ... exceptions.TypeError
-None * 2 ... exceptions.TypeError
-None *= 2 ... exceptions.TypeError
-None / 2 ... exceptions.TypeError
-None /= 2 ... exceptions.TypeError
-None ** 2 ... exceptions.TypeError
-None **= 2 ... exceptions.TypeError
-None % 2 ... exceptions.TypeError
-None %= 2 ... exceptions.TypeError
-None + (2+0j) ... exceptions.TypeError
-None += (2+0j) ... exceptions.TypeError
-None - (2+0j) ... exceptions.TypeError
-None -= (2+0j) ... exceptions.TypeError
-None * (2+0j) ... exceptions.TypeError
-None *= (2+0j) ... exceptions.TypeError
-None / (2+0j) ... exceptions.TypeError
-None /= (2+0j) ... exceptions.TypeError
-None ** (2+0j) ... exceptions.TypeError
-None **= (2+0j) ... exceptions.TypeError
-None % (2+0j) ... exceptions.TypeError
-None %= (2+0j) ... exceptions.TypeError
-None + [1] ... exceptions.TypeError
-None += [1] ... exceptions.TypeError
-None - [1] ... exceptions.TypeError
-None -= [1] ... exceptions.TypeError
-None * [1] ... exceptions.TypeError
-None *= [1] ... exceptions.TypeError
-None / [1] ... exceptions.TypeError
-None /= [1] ... exceptions.TypeError
-None ** [1] ... exceptions.TypeError
-None **= [1] ... exceptions.TypeError
-None % [1] ... exceptions.TypeError
-None %= [1] ... exceptions.TypeError
-None + (2,) ... exceptions.TypeError
-None += (2,) ... exceptions.TypeError
-None - (2,) ... exceptions.TypeError
-None -= (2,) ... exceptions.TypeError
-None * (2,) ... exceptions.TypeError
-None *= (2,) ... exceptions.TypeError
-None / (2,) ... exceptions.TypeError
-None /= (2,) ... exceptions.TypeError
-None ** (2,) ... exceptions.TypeError
-None **= (2,) ... exceptions.TypeError
-None % (2,) ... exceptions.TypeError
-None %= (2,) ... exceptions.TypeError
-None + None ... exceptions.TypeError
-None += None ... exceptions.TypeError
-None - None ... exceptions.TypeError
-None -= None ... exceptions.TypeError
-None * None ... exceptions.TypeError
-None *= None ... exceptions.TypeError
-None / None ... exceptions.TypeError
-None /= None ... exceptions.TypeError
-None ** None ... exceptions.TypeError
-None **= None ... exceptions.TypeError
-None % None ... exceptions.TypeError
-None %= None ... exceptions.TypeError
-None + <MethodNumber 2> ... exceptions.TypeError
-None += <MethodNumber 2> ... exceptions.TypeError
-None - <MethodNumber 2> ... exceptions.TypeError
-None -= <MethodNumber 2> ... exceptions.TypeError
-None * <MethodNumber 2> ... exceptions.TypeError
-None *= <MethodNumber 2> ... exceptions.TypeError
-None / <MethodNumber 2> ... exceptions.TypeError
-None /= <MethodNumber 2> ... exceptions.TypeError
-None ** <MethodNumber 2> ... exceptions.TypeError
-None **= <MethodNumber 2> ... exceptions.TypeError
-None % <MethodNumber 2> ... exceptions.TypeError
-None %= <MethodNumber 2> ... exceptions.TypeError
-None + <CoerceNumber 2> ... exceptions.TypeError
-None += <CoerceNumber 2> ... exceptions.TypeError
-None - <CoerceNumber 2> ... exceptions.TypeError
-None -= <CoerceNumber 2> ... exceptions.TypeError
-None * <CoerceNumber 2> ... exceptions.TypeError
-None *= <CoerceNumber 2> ... exceptions.TypeError
-None / <CoerceNumber 2> ... exceptions.TypeError
-None /= <CoerceNumber 2> ... exceptions.TypeError
-None ** <CoerceNumber 2> ... exceptions.TypeError
-None **= <CoerceNumber 2> ... exceptions.TypeError
-None % <CoerceNumber 2> ... exceptions.TypeError
-None %= <CoerceNumber 2> ... exceptions.TypeError
-<MethodNumber 2> + 2 = 4
-<MethodNumber 2> += 2 => 4
-<MethodNumber 2> - 2 = 0
-<MethodNumber 2> -= 2 => 0
-<MethodNumber 2> * 2 = 4
-<MethodNumber 2> *= 2 => 4
-<MethodNumber 2> / 2 = 1
-<MethodNumber 2> /= 2 => 1
-<MethodNumber 2> ** 2 = 4
-<MethodNumber 2> **= 2 => 4
-<MethodNumber 2> % 2 = 0
-<MethodNumber 2> %= 2 => 0
-<MethodNumber 2> + 4.0 = 6.0
-<MethodNumber 2> += 4.0 => 6.0
-<MethodNumber 2> - 4.0 = -2.0
-<MethodNumber 2> -= 4.0 => -2.0
-<MethodNumber 2> * 4.0 = 8.0
-<MethodNumber 2> *= 4.0 => 8.0
-<MethodNumber 2> / 4.0 = 0.5
-<MethodNumber 2> /= 4.0 => 0.5
-<MethodNumber 2> ** 4.0 = 16.0
-<MethodNumber 2> **= 4.0 => 16.0
-<MethodNumber 2> % 4.0 = 2.0
-<MethodNumber 2> %= 4.0 => 2.0
-<MethodNumber 2> + 2 = 4
-<MethodNumber 2> += 2 => 4
-<MethodNumber 2> - 2 = 0
-<MethodNumber 2> -= 2 => 0
-<MethodNumber 2> * 2 = 4
-<MethodNumber 2> *= 2 => 4
-<MethodNumber 2> / 2 = 1
-<MethodNumber 2> /= 2 => 1
-<MethodNumber 2> ** 2 = 4
-<MethodNumber 2> **= 2 => 4
-<MethodNumber 2> % 2 = 0
-<MethodNumber 2> %= 2 => 0
-<MethodNumber 2> + (2+0j) = (4.0 + 0.0j)
-<MethodNumber 2> += (2+0j) => (4.0 + 0.0j)
-<MethodNumber 2> - (2+0j) = (0.0 + 0.0j)
-<MethodNumber 2> -= (2+0j) => (0.0 + 0.0j)
-<MethodNumber 2> * (2+0j) = (4.0 + 0.0j)
-<MethodNumber 2> *= (2+0j) => (4.0 + 0.0j)
-<MethodNumber 2> / (2+0j) = (1.0 + 0.0j)
-<MethodNumber 2> /= (2+0j) => (1.0 + 0.0j)
-<MethodNumber 2> ** (2+0j) = (4.0 + 0.0j)
-<MethodNumber 2> **= (2+0j) => (4.0 + 0.0j)
-<MethodNumber 2> % (2+0j) = (0.0 + 0.0j)
-<MethodNumber 2> %= (2+0j) => (0.0 + 0.0j)
-<MethodNumber 2> + [1] ... exceptions.TypeError
-<MethodNumber 2> += [1] ... exceptions.TypeError
-<MethodNumber 2> - [1] ... exceptions.TypeError
-<MethodNumber 2> -= [1] ... exceptions.TypeError
-<MethodNumber 2> * [1] = [1, 1]
-<MethodNumber 2> *= [1] => [1, 1]
-<MethodNumber 2> / [1] ... exceptions.TypeError
-<MethodNumber 2> /= [1] ... exceptions.TypeError
-<MethodNumber 2> ** [1] ... exceptions.TypeError
-<MethodNumber 2> **= [1] ... exceptions.TypeError
-<MethodNumber 2> % [1] ... exceptions.TypeError
-<MethodNumber 2> %= [1] ... exceptions.TypeError
-<MethodNumber 2> + (2,) ... exceptions.TypeError
-<MethodNumber 2> += (2,) ... exceptions.TypeError
-<MethodNumber 2> - (2,) ... exceptions.TypeError
-<MethodNumber 2> -= (2,) ... exceptions.TypeError
-<MethodNumber 2> * (2,) = (2, 2)
-<MethodNumber 2> *= (2,) => (2, 2)
-<MethodNumber 2> / (2,) ... exceptions.TypeError
-<MethodNumber 2> /= (2,) ... exceptions.TypeError
-<MethodNumber 2> ** (2,) ... exceptions.TypeError
-<MethodNumber 2> **= (2,) ... exceptions.TypeError
-<MethodNumber 2> % (2,) ... exceptions.TypeError
-<MethodNumber 2> %= (2,) ... exceptions.TypeError
-<MethodNumber 2> + None ... exceptions.TypeError
-<MethodNumber 2> += None ... exceptions.TypeError
-<MethodNumber 2> - None ... exceptions.TypeError
-<MethodNumber 2> -= None ... exceptions.TypeError
-<MethodNumber 2> * None ... exceptions.TypeError
-<MethodNumber 2> *= None ... exceptions.TypeError
-<MethodNumber 2> / None ... exceptions.TypeError
-<MethodNumber 2> /= None ... exceptions.TypeError
-<MethodNumber 2> ** None ... exceptions.TypeError
-<MethodNumber 2> **= None ... exceptions.TypeError
-<MethodNumber 2> % None ... exceptions.TypeError
-<MethodNumber 2> %= None ... exceptions.TypeError
-<MethodNumber 2> + <MethodNumber 2> = 4
-<MethodNumber 2> += <MethodNumber 2> => 4
-<MethodNumber 2> - <MethodNumber 2> = 0
-<MethodNumber 2> -= <MethodNumber 2> => 0
-<MethodNumber 2> * <MethodNumber 2> = 4
-<MethodNumber 2> *= <MethodNumber 2> => 4
-<MethodNumber 2> / <MethodNumber 2> = 1
-<MethodNumber 2> /= <MethodNumber 2> => 1
-<MethodNumber 2> ** <MethodNumber 2> = 4
-<MethodNumber 2> **= <MethodNumber 2> => 4
-<MethodNumber 2> % <MethodNumber 2> = 0
-<MethodNumber 2> %= <MethodNumber 2> => 0
-<MethodNumber 2> + <CoerceNumber 2> = 4
-<MethodNumber 2> += <CoerceNumber 2> => 4
-<MethodNumber 2> - <CoerceNumber 2> = 0
-<MethodNumber 2> -= <CoerceNumber 2> => 0
-<MethodNumber 2> * <CoerceNumber 2> = 4
-<MethodNumber 2> *= <CoerceNumber 2> => 4
-<MethodNumber 2> / <CoerceNumber 2> = 1
-<MethodNumber 2> /= <CoerceNumber 2> => 1
-<MethodNumber 2> ** <CoerceNumber 2> = 4
-<MethodNumber 2> **= <CoerceNumber 2> => 4
-<MethodNumber 2> % <CoerceNumber 2> = 0
-<MethodNumber 2> %= <CoerceNumber 2> => 0
-<CoerceNumber 2> + 2 = 4
-<CoerceNumber 2> += 2 => 4
-<CoerceNumber 2> - 2 = 0
-<CoerceNumber 2> -= 2 => 0
-<CoerceNumber 2> * 2 = 4
-<CoerceNumber 2> *= 2 => 4
-<CoerceNumber 2> / 2 = 1
-<CoerceNumber 2> /= 2 => 1
-<CoerceNumber 2> ** 2 = 4
-<CoerceNumber 2> **= 2 => 4
-<CoerceNumber 2> % 2 = 0
-<CoerceNumber 2> %= 2 => 0
-<CoerceNumber 2> + 4.0 = 6.0
-<CoerceNumber 2> += 4.0 => 6.0
-<CoerceNumber 2> - 4.0 = -2.0
-<CoerceNumber 2> -= 4.0 => -2.0
-<CoerceNumber 2> * 4.0 = 8.0
-<CoerceNumber 2> *= 4.0 => 8.0
-<CoerceNumber 2> / 4.0 = 0.5
-<CoerceNumber 2> /= 4.0 => 0.5
-<CoerceNumber 2> ** 4.0 = 16.0
-<CoerceNumber 2> **= 4.0 => 16.0
-<CoerceNumber 2> % 4.0 = 2.0
-<CoerceNumber 2> %= 4.0 => 2.0
-<CoerceNumber 2> + 2 = 4
-<CoerceNumber 2> += 2 => 4
-<CoerceNumber 2> - 2 = 0
-<CoerceNumber 2> -= 2 => 0
-<CoerceNumber 2> * 2 = 4
-<CoerceNumber 2> *= 2 => 4
-<CoerceNumber 2> / 2 = 1
-<CoerceNumber 2> /= 2 => 1
-<CoerceNumber 2> ** 2 = 4
-<CoerceNumber 2> **= 2 => 4
-<CoerceNumber 2> % 2 = 0
-<CoerceNumber 2> %= 2 => 0
-<CoerceNumber 2> + (2+0j) = (4.0 + 0.0j)
-<CoerceNumber 2> += (2+0j) => (4.0 + 0.0j)
-<CoerceNumber 2> - (2+0j) = (0.0 + 0.0j)
-<CoerceNumber 2> -= (2+0j) => (0.0 + 0.0j)
-<CoerceNumber 2> * (2+0j) = (4.0 + 0.0j)
-<CoerceNumber 2> *= (2+0j) => (4.0 + 0.0j)
-<CoerceNumber 2> / (2+0j) = (1.0 + 0.0j)
-<CoerceNumber 2> /= (2+0j) => (1.0 + 0.0j)
-<CoerceNumber 2> ** (2+0j) = (4.0 + 0.0j)
-<CoerceNumber 2> **= (2+0j) => (4.0 + 0.0j)
-<CoerceNumber 2> % (2+0j) = (0.0 + 0.0j)
-<CoerceNumber 2> %= (2+0j) => (0.0 + 0.0j)
-<CoerceNumber 2> + [1] ... exceptions.TypeError
-<CoerceNumber 2> += [1] ... exceptions.TypeError
-<CoerceNumber 2> - [1] ... exceptions.TypeError
-<CoerceNumber 2> -= [1] ... exceptions.TypeError
-<CoerceNumber 2> * [1] = [1, 1]
-<CoerceNumber 2> *= [1] => [1, 1]
-<CoerceNumber 2> / [1] ... exceptions.TypeError
-<CoerceNumber 2> /= [1] ... exceptions.TypeError
-<CoerceNumber 2> ** [1] ... exceptions.TypeError
-<CoerceNumber 2> **= [1] ... exceptions.TypeError
-<CoerceNumber 2> % [1] ... exceptions.TypeError
-<CoerceNumber 2> %= [1] ... exceptions.TypeError
-<CoerceNumber 2> + (2,) ... exceptions.TypeError
-<CoerceNumber 2> += (2,) ... exceptions.TypeError
-<CoerceNumber 2> - (2,) ... exceptions.TypeError
-<CoerceNumber 2> -= (2,) ... exceptions.TypeError
-<CoerceNumber 2> * (2,) = (2, 2)
-<CoerceNumber 2> *= (2,) => (2, 2)
-<CoerceNumber 2> / (2,) ... exceptions.TypeError
-<CoerceNumber 2> /= (2,) ... exceptions.TypeError
-<CoerceNumber 2> ** (2,) ... exceptions.TypeError
-<CoerceNumber 2> **= (2,) ... exceptions.TypeError
-<CoerceNumber 2> % (2,) ... exceptions.TypeError
-<CoerceNumber 2> %= (2,) ... exceptions.TypeError
-<CoerceNumber 2> + None ... exceptions.TypeError
-<CoerceNumber 2> += None ... exceptions.TypeError
-<CoerceNumber 2> - None ... exceptions.TypeError
-<CoerceNumber 2> -= None ... exceptions.TypeError
-<CoerceNumber 2> * None ... exceptions.TypeError
-<CoerceNumber 2> *= None ... exceptions.TypeError
-<CoerceNumber 2> / None ... exceptions.TypeError
-<CoerceNumber 2> /= None ... exceptions.TypeError
-<CoerceNumber 2> ** None ... exceptions.TypeError
-<CoerceNumber 2> **= None ... exceptions.TypeError
-<CoerceNumber 2> % None ... exceptions.TypeError
-<CoerceNumber 2> %= None ... exceptions.TypeError
-<CoerceNumber 2> + <MethodNumber 2> = 4
-<CoerceNumber 2> += <MethodNumber 2> => 4
-<CoerceNumber 2> - <MethodNumber 2> = 0
-<CoerceNumber 2> -= <MethodNumber 2> => 0
-<CoerceNumber 2> * <MethodNumber 2> = 4
-<CoerceNumber 2> *= <MethodNumber 2> => 4
-<CoerceNumber 2> / <MethodNumber 2> = 1
-<CoerceNumber 2> /= <MethodNumber 2> => 1
-<CoerceNumber 2> ** <MethodNumber 2> = 4
-<CoerceNumber 2> **= <MethodNumber 2> => 4
-<CoerceNumber 2> % <MethodNumber 2> = 0
-<CoerceNumber 2> %= <MethodNumber 2> => 0
-<CoerceNumber 2> + <CoerceNumber 2> = 4
-<CoerceNumber 2> += <CoerceNumber 2> => 4
-<CoerceNumber 2> - <CoerceNumber 2> = 0
-<CoerceNumber 2> -= <CoerceNumber 2> => 0
-<CoerceNumber 2> * <CoerceNumber 2> = 4
-<CoerceNumber 2> *= <CoerceNumber 2> => 4
-<CoerceNumber 2> / <CoerceNumber 2> = 1
-<CoerceNumber 2> /= <CoerceNumber 2> => 1
-<CoerceNumber 2> ** <CoerceNumber 2> = 4
-<CoerceNumber 2> **= <CoerceNumber 2> => 4
-<CoerceNumber 2> % <CoerceNumber 2> = 0
-<CoerceNumber 2> %= <CoerceNumber 2> => 0
-divmod(2, 2) = (1, 0)
-divmod(2, 4.0) = (0.0, 2.0)
-divmod(2, 2) = (1L, 0L)
-divmod(2, (2+0j)) = ((1+0j), 0j)
-divmod(2, [1]) ... exceptions.TypeError
-divmod(2, (2,)) ... exceptions.TypeError
-divmod(2, None) ... exceptions.TypeError
-divmod(2, <MethodNumber 2>) ... exceptions.TypeError
-divmod(2, <CoerceNumber 2>) = (1, 0)
-divmod(4.0, 2) = (2.0, 0.0)
-divmod(4.0, 4.0) = (1.0, 0.0)
-divmod(4.0, 2) = (2.0, 0.0)
-divmod(4.0, (2+0j)) = ((2+0j), 0j)
-divmod(4.0, [1]) ... exceptions.TypeError
-divmod(4.0, (2,)) ... exceptions.TypeError
-divmod(4.0, None) ... exceptions.TypeError
-divmod(4.0, <MethodNumber 2>) ... exceptions.TypeError
-divmod(4.0, <CoerceNumber 2>) = (2.0, 0.0)
-divmod(2, 2) = (1L, 0L)
-divmod(2, 4.0) = (0.0, 2.0)
-divmod(2, 2) = (1L, 0L)
-divmod(2, (2+0j)) = ((1+0j), 0j)
-divmod(2, [1]) ... exceptions.TypeError
-divmod(2, (2,)) ... exceptions.TypeError
-divmod(2, None) ... exceptions.TypeError
-divmod(2, <MethodNumber 2>) ... exceptions.TypeError
-divmod(2, <CoerceNumber 2>) = (1L, 0L)
-divmod((2+0j), 2) = ((1+0j), 0j)
-divmod((2+0j), 4.0) = (0j, (2+0j))
-divmod((2+0j), 2) = ((1+0j), 0j)
-divmod((2+0j), (2+0j)) = ((1+0j), 0j)
-divmod((2+0j), [1]) ... exceptions.TypeError
-divmod((2+0j), (2,)) ... exceptions.TypeError
-divmod((2+0j), None) ... exceptions.TypeError
-divmod((2+0j), <MethodNumber 2>) ... exceptions.TypeError
-divmod((2+0j), <CoerceNumber 2>) = ((1+0j), 0j)
-divmod([1], 2) ... exceptions.TypeError
-divmod([1], 4.0) ... exceptions.TypeError
-divmod([1], 2) ... exceptions.TypeError
-divmod([1], (2+0j)) ... exceptions.TypeError
-divmod([1], [1]) ... exceptions.TypeError
-divmod([1], (2,)) ... exceptions.TypeError
-divmod([1], None) ... exceptions.TypeError
-divmod([1], <MethodNumber 2>) ... exceptions.TypeError
-divmod([1], <CoerceNumber 2>) ... exceptions.TypeError
-divmod((2,), 2) ... exceptions.TypeError
-divmod((2,), 4.0) ... exceptions.TypeError
-divmod((2,), 2) ... exceptions.TypeError
-divmod((2,), (2+0j)) ... exceptions.TypeError
-divmod((2,), [1]) ... exceptions.TypeError
-divmod((2,), (2,)) ... exceptions.TypeError
-divmod((2,), None) ... exceptions.TypeError
-divmod((2,), <MethodNumber 2>) ... exceptions.TypeError
-divmod((2,), <CoerceNumber 2>) ... exceptions.TypeError
-divmod(None, 2) ... exceptions.TypeError
-divmod(None, 4.0) ... exceptions.TypeError
-divmod(None, 2) ... exceptions.TypeError
-divmod(None, (2+0j)) ... exceptions.TypeError
-divmod(None, [1]) ... exceptions.TypeError
-divmod(None, (2,)) ... exceptions.TypeError
-divmod(None, None) ... exceptions.TypeError
-divmod(None, <MethodNumber 2>) ... exceptions.TypeError
-divmod(None, <CoerceNumber 2>) ... exceptions.TypeError
-divmod(<MethodNumber 2>, 2) ... exceptions.TypeError
-divmod(<MethodNumber 2>, 4.0) ... exceptions.TypeError
-divmod(<MethodNumber 2>, 2) ... exceptions.TypeError
-divmod(<MethodNumber 2>, (2+0j)) ... exceptions.TypeError
-divmod(<MethodNumber 2>, [1]) ... exceptions.TypeError
-divmod(<MethodNumber 2>, (2,)) ... exceptions.TypeError
-divmod(<MethodNumber 2>, None) ... exceptions.TypeError
-divmod(<MethodNumber 2>, <MethodNumber 2>) ... exceptions.TypeError
-divmod(<MethodNumber 2>, <CoerceNumber 2>) ... exceptions.TypeError
-divmod(<CoerceNumber 2>, 2) = (1, 0)
-divmod(<CoerceNumber 2>, 4.0) = (0.0, 2.0)
-divmod(<CoerceNumber 2>, 2) = (1L, 0L)
-divmod(<CoerceNumber 2>, (2+0j)) = ((1+0j), 0j)
-divmod(<CoerceNumber 2>, [1]) ... exceptions.TypeError
-divmod(<CoerceNumber 2>, (2,)) ... exceptions.TypeError
-divmod(<CoerceNumber 2>, None) ... exceptions.TypeError
-divmod(<CoerceNumber 2>, <MethodNumber 2>) ... exceptions.TypeError
-divmod(<CoerceNumber 2>, <CoerceNumber 2>) = (1, 0)
diff --git a/Lib/test/output/test_compare b/Lib/test/output/test_compare
deleted file mode 100644
index 210bd97..0000000
--- a/Lib/test/output/test_compare
+++ /dev/null
@@ -1,101 +0,0 @@
-test_compare
-2 == 2
-2 == 2.0
-2 == 2
-2 == (2+0j)
-2 != [1]
-2 != (3,)
-2 != None
-2 != <Empty>
-2 == <Coerce 2>
-2 == <Cmp 2.0>
-2.0 == 2
-2.0 == 2.0
-2.0 == 2
-2.0 == (2+0j)
-2.0 != [1]
-2.0 != (3,)
-2.0 != None
-2.0 != <Empty>
-2.0 == <Coerce 2>
-2.0 == <Cmp 2.0>
-2 == 2
-2 == 2.0
-2 == 2
-2 == (2+0j)
-2 != [1]
-2 != (3,)
-2 != None
-2 != <Empty>
-2 == <Coerce 2>
-2 == <Cmp 2.0>
-(2+0j) == 2
-(2+0j) == 2.0
-(2+0j) == 2
-(2+0j) == (2+0j)
-(2+0j) != [1]
-(2+0j) != (3,)
-(2+0j) != None
-(2+0j) != <Empty>
-(2+0j) == <Coerce 2>
-(2+0j) == <Cmp 2.0>
-[1] != 2
-[1] != 2.0
-[1] != 2
-[1] != (2+0j)
-[1] == [1]
-[1] != (3,)
-[1] != None
-[1] != <Empty>
-[1] != <Coerce 2>
-[1] != <Cmp 2.0>
-(3,) != 2
-(3,) != 2.0
-(3,) != 2
-(3,) != (2+0j)
-(3,) != [1]
-(3,) == (3,)
-(3,) != None
-(3,) != <Empty>
-(3,) != <Coerce 2>
-(3,) != <Cmp 2.0>
-None != 2
-None != 2.0
-None != 2
-None != (2+0j)
-None != [1]
-None != (3,)
-None == None
-None != <Empty>
-None != <Coerce 2>
-None != <Cmp 2.0>
-<Empty> != 2
-<Empty> != 2.0
-<Empty> != 2
-<Empty> != (2+0j)
-<Empty> != [1]
-<Empty> != (3,)
-<Empty> != None
-<Empty> == <Empty>
-<Empty> != <Coerce 2>
-<Empty> != <Cmp 2.0>
-<Coerce 2> == 2
-<Coerce 2> == 2.0
-<Coerce 2> == 2
-<Coerce 2> == (2+0j)
-<Coerce 2> != [1]
-<Coerce 2> != (3,)
-<Coerce 2> != None
-<Coerce 2> != <Empty>
-<Coerce 2> == <Coerce 2>
-<Coerce 2> == <Cmp 2.0>
-<Cmp 2.0> == 2
-<Cmp 2.0> == 2.0
-<Cmp 2.0> == 2
-<Cmp 2.0> == (2+0j)
-<Cmp 2.0> != [1]
-<Cmp 2.0> != (3,)
-<Cmp 2.0> != None
-<Cmp 2.0> != <Empty>
-<Cmp 2.0> == <Coerce 2>
-<Cmp 2.0> == <Cmp 2.0>
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 85f57a6..566e54b 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -520,7 +520,7 @@ def runtest(test, generate, verbose, quiet, testdir=None, huntrleaks=False):
import gc
def cleanup():
import _strptime, linecache, warnings, dircache
- import urlparse, urllib, urllib2
+ import urlparse, urllib, urllib2, mimetypes, doctest
from distutils.dir_util import _path_created
_path_created.clear()
warnings.filters[:] = fs
@@ -536,6 +536,8 @@ def runtest(test, generate, verbose, quiet, testdir=None, huntrleaks=False):
sys.path_importer_cache.update(pic)
dircache.reset()
linecache.clearcache()
+ mimetypes._default_mime_types()
+ doctest.master = None
if indirect_test:
def run_the_test():
indirect_test()
@@ -547,6 +549,7 @@ def runtest(test, generate, verbose, quiet, testdir=None, huntrleaks=False):
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, \
("1234567890"*(repcount//10 + 1))[:repcount]
+ cleanup()
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
@@ -744,6 +747,8 @@ _expectations = {
test_sunaudiodev
test_threadsignals
test_timing
+ test_wait3
+ test_wait4
""",
'linux2':
"""
@@ -761,6 +766,8 @@ _expectations = {
test_nis
test_ntpath
test_ossaudiodev
+ test_sqlite
+ test_startfile
test_sunaudiodev
""",
'mac':
@@ -800,6 +807,8 @@ _expectations = {
test_pwd
test_resource
test_signal
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_sundry
test_tarfile
@@ -824,6 +833,8 @@ _expectations = {
test_openpty
test_pyexpat
test_sax
+ test_startfile
+ test_sqlite
test_sunaudiodev
test_sundry
""",
@@ -846,6 +857,8 @@ _expectations = {
test_openpty
test_pyexpat
test_sax
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_sundry
""",
@@ -873,6 +886,8 @@ _expectations = {
test_pyexpat
test_queue
test_sax
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_sundry
test_thread
@@ -913,6 +928,8 @@ _expectations = {
test_pty
test_pwd
test_strop
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_sundry
test_thread
@@ -930,7 +947,6 @@ _expectations = {
test_cd
test_cl
test_curses
- test_dl
test_gdbm
test_gl
test_imgfile
@@ -942,6 +958,8 @@ _expectations = {
test_ntpath
test_ossaudiodev
test_poll
+ test_sqlite
+ test_startfile
test_sunaudiodev
""",
'sunos5':
@@ -960,6 +978,8 @@ _expectations = {
test_imgfile
test_linuxaudiodev
test_openpty
+ test_sqlite
+ test_startfile
test_zipfile
test_zlib
""",
@@ -986,6 +1006,8 @@ _expectations = {
test_openpty
test_pyexpat
test_sax
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_zipfile
test_zlib
@@ -1011,6 +1033,8 @@ _expectations = {
test_poll
test_popen2
test_resource
+ test_sqlite
+ test_startfile
test_sunaudiodev
""",
'cygwin':
@@ -1032,6 +1056,7 @@ _expectations = {
test_nis
test_ossaudiodev
test_socketserver
+ test_sqlite
test_sunaudiodev
""",
'os2emx':
@@ -1058,6 +1083,8 @@ _expectations = {
test_pty
test_resource
test_signal
+ test_sqlite
+ test_startfile
test_sunaudiodev
""",
'freebsd4':
@@ -1084,6 +1111,8 @@ _expectations = {
test_scriptpackages
test_socket_ssl
test_socketserver
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_tcl
test_timeout
@@ -1113,6 +1142,8 @@ _expectations = {
test_macostools
test_nis
test_ossaudiodev
+ test_sqlite
+ test_startfile
test_sunaudiodev
test_tcl
test_winreg
@@ -1120,6 +1151,38 @@ _expectations = {
test_zipimport
test_zlib
""",
+ 'openbsd3':
+ """
+ test_aepack
+ test_al
+ test_applesingle
+ test_bsddb
+ test_bsddb3
+ test_cd
+ test_cl
+ test_ctypes
+ test_dl
+ test_gdbm
+ test_gl
+ test_imgfile
+ test_linuxaudiodev
+ test_locale
+ test_macfs
+ test_macostools
+ test_nis
+ test_normalization
+ test_ossaudiodev
+ test_pep277
+ test_plistlib
+ test_scriptpackages
+ test_tcl
+ test_sqlite
+ test_startfile
+ test_sunaudiodev
+ test_unicode_file
+ test_winreg
+ test_winsound
+ """,
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
diff --git a/Lib/test/test___all__.py b/Lib/test/test___all__.py
index 0b2e7da..c45e139 100644
--- a/Lib/test/test___all__.py
+++ b/Lib/test/test___all__.py
@@ -5,8 +5,6 @@ from test.test_support import verify, verbose
import sys
import warnings
-warnings.filterwarnings("ignore", ".* regsub .*", DeprecationWarning,
- r'^regsub$')
warnings.filterwarnings("ignore",
"the gopherlib module is deprecated",
DeprecationWarning,
@@ -128,8 +126,6 @@ class AllTest(unittest.TestCase):
self.check_all("quopri")
self.check_all("random")
self.check_all("re")
- self.check_all("reconvert")
- self.check_all("regsub")
self.check_all("repr")
self.check_all("rexec")
self.check_all("rfc822")
diff --git a/Lib/test/test_applesingle.py b/Lib/test/test_applesingle.py
index 2a2d60a..d533f1a 100644
--- a/Lib/test/test_applesingle.py
+++ b/Lib/test/test_applesingle.py
@@ -15,8 +15,8 @@ AS_VERSION=0x00020000
dataforkdata = 'hello\r\0world\n'
resourceforkdata = 'goodbye\ncruel\0world\r'
-applesingledata = struct.pack("ll16sh", AS_MAGIC, AS_VERSION, "foo", 2) + \
- struct.pack("llllll", 1, 50, len(dataforkdata),
+applesingledata = struct.pack(">ll16sh", AS_MAGIC, AS_VERSION, "foo", 2) + \
+ struct.pack(">llllll", 1, 50, len(dataforkdata),
2, 50+len(dataforkdata), len(resourceforkdata)) + \
dataforkdata + \
resourceforkdata
diff --git a/Lib/test/test_array.py b/Lib/test/test_array.py
index 87d395d..62361fc 100755
--- a/Lib/test/test_array.py
+++ b/Lib/test/test_array.py
@@ -61,7 +61,7 @@ class BaseTest(unittest.TestCase):
bi = a.buffer_info()
self.assert_(isinstance(bi, tuple))
self.assertEqual(len(bi), 2)
- self.assert_(isinstance(bi[0], int))
+ self.assert_(isinstance(bi[0], (int, long)))
self.assert_(isinstance(bi[1], int))
self.assertEqual(bi[1], len(a))
diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py
index b42caa3..c64ad28 100644
--- a/Lib/test/test_ast.py
+++ b/Lib/test/test_ast.py
@@ -119,7 +119,8 @@ eval_tests = [
# excepthandler, arguments, keywords, alias
if __name__=='__main__' and sys.argv[1:] == ['-g']:
- for statements, kind in ((exec_tests, "exec"), (single_tests, "single"), (eval_tests, "eval")):
+ for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
+ (eval_tests, "eval")):
print kind+"_results = ["
for s in statements:
print repr(to_tuple(compile(s, "?", kind, 0x400)))+","
@@ -131,7 +132,7 @@ def test_order(ast_node, parent_pos):
if not isinstance(ast_node, _ast.AST) or ast_node._fields == None:
return
- if isinstance(ast_node, (_ast.expr, _ast.stmt)):
+ if isinstance(ast_node, (_ast.expr, _ast.stmt, _ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
assert node_pos >= parent_pos, (node_pos, parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
@@ -145,8 +146,8 @@ def test_order(ast_node, parent_pos):
def run_tests():
for input, output, kind in ((exec_tests, exec_results, "exec"),
- (single_tests, single_results, "single"),
- (eval_tests, eval_results, "eval")):
+ (single_tests, single_results, "single"),
+ (eval_tests, eval_results, "eval")):
for i, o in itertools.izip(input, output):
ast_tree = compile(i, "?", kind, 0x400)
assert to_tuple(ast_tree) == o
@@ -165,7 +166,7 @@ exec_results = [
('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]),
('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]),
('Module', [('Raise', (1, 0), ('Name', (1, 6), 'Exception', ('Load',)), ('Str', (1, 17), 'string'), None)]),
-('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('excepthandler', ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [])]),
+('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('excepthandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))], 3, 0)], [])]),
('Module', [('TryFinally', (1, 0), [('Pass', (2, 2))], [('Pass', (4, 2))])]),
('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]),
('Module', [('Import', (1, 0), [('alias', 'sys', None)])]),
diff --git a/Lib/test/test_audioop.py b/Lib/test/test_audioop.py
index 440adab..f585733 100644
--- a/Lib/test/test_audioop.py
+++ b/Lib/test/test_audioop.py
@@ -136,12 +136,30 @@ def testlin2adpcm(data):
return 0
return 1
+def testlin2alaw(data):
+ if verbose:
+ print 'lin2alaw'
+ if audioop.lin2alaw(data[0], 1) != '\xd5\xc5\xf5' or \
+ audioop.lin2alaw(data[1], 2) != '\xd5\xd5\xd5' or \
+ audioop.lin2alaw(data[2], 4) != '\xd5\xd5\xd5':
+ return 0
+ return 1
+
+def testalaw2lin(data):
+ if verbose:
+ print 'alaw2lin'
+ # Cursory
+ d = audioop.lin2alaw(data[0], 1)
+ if audioop.alaw2lin(d, 1) != data[0]:
+ return 0
+ return 1
+
def testlin2ulaw(data):
if verbose:
print 'lin2ulaw'
- if audioop.lin2ulaw(data[0], 1) != '\377\347\333' or \
- audioop.lin2ulaw(data[1], 2) != '\377\377\377' or \
- audioop.lin2ulaw(data[2], 4) != '\377\377\377':
+ if audioop.lin2ulaw(data[0], 1) != '\xff\xe7\xdb' or \
+ audioop.lin2ulaw(data[1], 2) != '\xff\xff\xff' or \
+ audioop.lin2ulaw(data[2], 4) != '\xff\xff\xff':
return 0
return 1
diff --git a/Lib/test/test_augassign.py b/Lib/test/test_augassign.py
index 22cca44..0309d6e 100644
--- a/Lib/test/test_augassign.py
+++ b/Lib/test/test_augassign.py
@@ -1,258 +1,312 @@
# Augmented assignment test.
-x = 2
-x += 1
-x *= 2
-x **= 2
-x -= 8
-x %= 12
-x >>= 1
-x &= 2
-x |= 5
-x ^= 1
-x <<= 2
-x /= 2
-x //= 2
-
-print x
-print int(x)
-
-x = [2]
-x[0] += 1
-x[0] *= 2
-x[0] **= 2
-x[0] -= 8
-x[0] %= 12
-x[0] >>= 1
-x[0] &= 2
-x[0] |= 5
-x[0] ^= 1
-x[0] <<= 2
-x[0] /= 2
-x[0] //= 2
-
-print x
-print int(x[0])
-
-x = {0: 2}
-x[0] += 1
-x[0] *= 2
-x[0] **= 2
-x[0] -= 8
-x[0] %= 12
-x[0] >>= 1
-x[0] &= 2
-x[0] |= 5
-x[0] ^= 1
-x[0] <<= 2
-x[0] /= 2
-x[0] //= 2
-
-print x[0]
-print int(x[0])
-
-x = [1,2]
-x += [3,4]
-x *= 2
-
-print x
-
-x = [1, 2, 3]
-y = x
-x[1:2] *= 2
-y[1:2] += [1]
-
-print x
-print x is y
-
-class aug_test:
- def __init__(self, value):
- self.val = value
- def __radd__(self, val):
- return self.val + val
- def __add__(self, val):
- return aug_test(self.val + val)
-
-
-class aug_test2(aug_test):
- def __iadd__(self, val):
- self.val = self.val + val
- return self
-
-class aug_test3(aug_test):
- def __iadd__(self, val):
- return aug_test3(self.val + val)
-
-x = aug_test(1)
-y = x
-x += 10
-
-print isinstance(x, aug_test)
-print y is not x
-print x.val
-
-x = aug_test2(2)
-y = x
-x += 10
-
-print y is x
-print x.val
-
-x = aug_test3(3)
-y = x
-x += 10
-
-print isinstance(x, aug_test3)
-print y is not x
-print x.val
-
-class testall:
-
- def __add__(self, val):
- print "__add__ called"
- def __radd__(self, val):
- print "__radd__ called"
- def __iadd__(self, val):
- print "__iadd__ called"
- return self
-
- def __sub__(self, val):
- print "__sub__ called"
- def __rsub__(self, val):
- print "__rsub__ called"
- def __isub__(self, val):
- print "__isub__ called"
- return self
-
- def __mul__(self, val):
- print "__mul__ called"
- def __rmul__(self, val):
- print "__rmul__ called"
- def __imul__(self, val):
- print "__imul__ called"
- return self
-
- def __floordiv__(self, val):
- print "__floordiv__ called"
- return self
- def __ifloordiv__(self, val):
- print "__ifloordiv__ called"
- return self
- def __rfloordiv__(self, val):
- print "__rfloordiv__ called"
- return self
-
- def __truediv__(self, val):
- print "__truediv__ called"
- return self
- def __itruediv__(self, val):
- print "__itruediv__ called"
- return self
- def __rtruediv__(self, val):
- print "__rtruediv__ called"
- return self
-
- def __mod__(self, val):
- print "__mod__ called"
- def __rmod__(self, val):
- print "__rmod__ called"
- def __imod__(self, val):
- print "__imod__ called"
- return self
-
- def __pow__(self, val):
- print "__pow__ called"
- def __rpow__(self, val):
- print "__rpow__ called"
- def __ipow__(self, val):
- print "__ipow__ called"
- return self
-
- def __or__(self, val):
- print "__or__ called"
- def __ror__(self, val):
- print "__ror__ called"
- def __ior__(self, val):
- print "__ior__ called"
- return self
-
- def __and__(self, val):
- print "__and__ called"
- def __rand__(self, val):
- print "__rand__ called"
- def __iand__(self, val):
- print "__iand__ called"
- return self
-
- def __xor__(self, val):
- print "__xor__ called"
- def __rxor__(self, val):
- print "__rxor__ called"
- def __ixor__(self, val):
- print "__ixor__ called"
- return self
-
- def __rshift__(self, val):
- print "__rshift__ called"
- def __rrshift__(self, val):
- print "__rrshift__ called"
- def __irshift__(self, val):
- print "__irshift__ called"
- return self
-
- def __lshift__(self, val):
- print "__lshift__ called"
- def __rlshift__(self, val):
- print "__rlshift__ called"
- def __ilshift__(self, val):
- print "__ilshift__ called"
- return self
-
-x = testall()
-x + 1
-1 + x
-x += 1
-
-x - 1
-1 - x
-x -= 1
-
-x * 1
-1 * x
-x *= 1
-
-x / 1
-1 / x
-x /= 1
-
-x // 1
-1 // x
-x //= 1
-
-x % 1
-1 % x
-x %= 1
-
-x ** 1
-1 ** x
-x **= 1
-
-x | 1
-1 | x
-x |= 1
-
-x & 1
-1 & x
-x &= 1
-
-x ^ 1
-1 ^ x
-x ^= 1
-
-x >> 1
-1 >> x
-x >>= 1
-
-x << 1
-1 << x
-x <<= 1
+from test.test_support import run_unittest
+import unittest
+
+
+class AugAssignTest(unittest.TestCase):
+ def testBasic(self):
+ x = 2
+ x += 1
+ x *= 2
+ x **= 2
+ x -= 8
+ x //= 5
+ x %= 3
+ x &= 2
+ x |= 5
+ x ^= 1
+ x /= 2
+ self.assertEquals(x, 3.0)
+
+ def testInList(self):
+ x = [2]
+ x[0] += 1
+ x[0] *= 2
+ x[0] **= 2
+ x[0] -= 8
+ x[0] //= 5
+ x[0] %= 3
+ x[0] &= 2
+ x[0] |= 5
+ x[0] ^= 1
+ x[0] /= 2
+ self.assertEquals(x[0], 3.0)
+
+ def testInDict(self):
+ x = {0: 2}
+ x[0] += 1
+ x[0] *= 2
+ x[0] **= 2
+ x[0] -= 8
+ x[0] //= 5
+ x[0] %= 3
+ x[0] &= 2
+ x[0] |= 5
+ x[0] ^= 1
+ x[0] /= 2
+ self.assertEquals(x[0], 3.0)
+
+ def testSequences(self):
+ x = [1,2]
+ x += [3,4]
+ x *= 2
+
+ self.assertEquals(x, [1, 2, 3, 4, 1, 2, 3, 4])
+
+ x = [1, 2, 3]
+ y = x
+ x[1:2] *= 2
+ y[1:2] += [1]
+
+ self.assertEquals(x, [1, 2, 1, 2, 3])
+ self.assert_(x is y)
+
+ def testCustomMethods1(self):
+
+ class aug_test:
+ def __init__(self, value):
+ self.val = value
+ def __radd__(self, val):
+ return self.val + val
+ def __add__(self, val):
+ return aug_test(self.val + val)
+
+ class aug_test2(aug_test):
+ def __iadd__(self, val):
+ self.val = self.val + val
+ return self
+
+ class aug_test3(aug_test):
+ def __iadd__(self, val):
+ return aug_test3(self.val + val)
+
+ x = aug_test(1)
+ y = x
+ x += 10
+
+ self.assert_(isinstance(x, aug_test))
+ self.assert_(y is not x)
+ self.assertEquals(x.val, 11)
+
+ x = aug_test2(2)
+ y = x
+ x += 10
+
+ self.assert_(y is x)
+ self.assertEquals(x.val, 12)
+
+ x = aug_test3(3)
+ y = x
+ x += 10
+
+ self.assert_(isinstance(x, aug_test3))
+ self.assert_(y is not x)
+ self.assertEquals(x.val, 13)
+
+
+ def testCustomMethods2(test_self):
+ output = []
+
+ class testall:
+ def __add__(self, val):
+ output.append("__add__ called")
+ def __radd__(self, val):
+ output.append("__radd__ called")
+ def __iadd__(self, val):
+ output.append("__iadd__ called")
+ return self
+
+ def __sub__(self, val):
+ output.append("__sub__ called")
+ def __rsub__(self, val):
+ output.append("__rsub__ called")
+ def __isub__(self, val):
+ output.append("__isub__ called")
+ return self
+
+ def __mul__(self, val):
+ output.append("__mul__ called")
+ def __rmul__(self, val):
+ output.append("__rmul__ called")
+ def __imul__(self, val):
+ output.append("__imul__ called")
+ return self
+
+ def __div__(self, val):
+ output.append("__div__ called")
+ def __rdiv__(self, val):
+ output.append("__rdiv__ called")
+ def __idiv__(self, val):
+ output.append("__idiv__ called")
+ return self
+
+ def __floordiv__(self, val):
+ output.append("__floordiv__ called")
+ return self
+ def __ifloordiv__(self, val):
+ output.append("__ifloordiv__ called")
+ return self
+ def __rfloordiv__(self, val):
+ output.append("__rfloordiv__ called")
+ return self
+
+ def __truediv__(self, val):
+ output.append("__truediv__ called")
+ return self
+ def __rtruediv__(self, val):
+ output.append("__rtruediv__ called")
+ return self
+ def __itruediv__(self, val):
+ output.append("__itruediv__ called")
+ return self
+
+ def __mod__(self, val):
+ output.append("__mod__ called")
+ def __rmod__(self, val):
+ output.append("__rmod__ called")
+ def __imod__(self, val):
+ output.append("__imod__ called")
+ return self
+
+ def __pow__(self, val):
+ output.append("__pow__ called")
+ def __rpow__(self, val):
+ output.append("__rpow__ called")
+ def __ipow__(self, val):
+ output.append("__ipow__ called")
+ return self
+
+ def __or__(self, val):
+ output.append("__or__ called")
+ def __ror__(self, val):
+ output.append("__ror__ called")
+ def __ior__(self, val):
+ output.append("__ior__ called")
+ return self
+
+ def __and__(self, val):
+ output.append("__and__ called")
+ def __rand__(self, val):
+ output.append("__rand__ called")
+ def __iand__(self, val):
+ output.append("__iand__ called")
+ return self
+
+ def __xor__(self, val):
+ output.append("__xor__ called")
+ def __rxor__(self, val):
+ output.append("__rxor__ called")
+ def __ixor__(self, val):
+ output.append("__ixor__ called")
+ return self
+
+ def __rshift__(self, val):
+ output.append("__rshift__ called")
+ def __rrshift__(self, val):
+ output.append("__rrshift__ called")
+ def __irshift__(self, val):
+ output.append("__irshift__ called")
+ return self
+
+ def __lshift__(self, val):
+ output.append("__lshift__ called")
+ def __rlshift__(self, val):
+ output.append("__rlshift__ called")
+ def __ilshift__(self, val):
+ output.append("__ilshift__ called")
+ return self
+
+ x = testall()
+ x + 1
+ 1 + x
+ x += 1
+
+ x - 1
+ 1 - x
+ x -= 1
+
+ x * 1
+ 1 * x
+ x *= 1
+
+ x / 1
+ 1 / x
+ x /= 1
+
+ x // 1
+ 1 // x
+ x //= 1
+
+ x % 1
+ 1 % x
+ x %= 1
+
+ x ** 1
+ 1 ** x
+ x **= 1
+
+ x | 1
+ 1 | x
+ x |= 1
+
+ x & 1
+ 1 & x
+ x &= 1
+
+ x ^ 1
+ 1 ^ x
+ x ^= 1
+
+ x >> 1
+ 1 >> x
+ x >>= 1
+
+ x << 1
+ 1 << x
+ x <<= 1
+
+ test_self.assertEquals(output, '''\
+__add__ called
+__radd__ called
+__iadd__ called
+__sub__ called
+__rsub__ called
+__isub__ called
+__mul__ called
+__rmul__ called
+__imul__ called
+__truediv__ called
+__rtruediv__ called
+__itruediv__ called
+__floordiv__ called
+__rfloordiv__ called
+__ifloordiv__ called
+__mod__ called
+__rmod__ called
+__imod__ called
+__pow__ called
+__rpow__ called
+__ipow__ called
+__or__ called
+__ror__ called
+__ior__ called
+__and__ called
+__rand__ called
+__iand__ called
+__xor__ called
+__rxor__ called
+__ixor__ called
+__rshift__ called
+__rrshift__ called
+__irshift__ called
+__lshift__ called
+__rlshift__ called
+__ilshift__ called
+'''.splitlines())
+
+def test_main():
+ run_unittest(AugAssignTest)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_bsddb.py b/Lib/test/test_bsddb.py
index 1ec4801..513e541 100755
--- a/Lib/test/test_bsddb.py
+++ b/Lib/test/test_bsddb.py
@@ -11,9 +11,10 @@ from test import test_support
from sets import Set
class TestBSDDB(unittest.TestCase):
+ openflag = 'c'
def setUp(self):
- self.f = self.openmethod[0](self.fname, 'c')
+ self.f = self.openmethod[0](self.fname, self.openflag, cachesize=32768)
self.d = dict(q='Guido', w='van', e='Rossum', r='invented', t='Python', y='')
for k, v in self.d.iteritems():
self.f[k] = v
@@ -267,6 +268,11 @@ class TestBTree_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.btopen]
+class TestBTree_InMemory_Truncate(TestBSDDB):
+ fname = None
+ openflag = 'n'
+ openmethod = [bsddb.btopen]
+
class TestHashTable(TestBSDDB):
fname = test_support.TESTFN
openmethod = [bsddb.hashopen]
@@ -285,6 +291,7 @@ def test_main(verbose=None):
TestHashTable,
TestBTree_InMemory,
TestHashTable_InMemory,
+ TestBTree_InMemory_Truncate,
)
if __name__ == "__main__":
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index 6f11fdd..ef4f407 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -108,6 +108,7 @@ class BuiltinTest(unittest.TestCase):
__import__('string')
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
+ self.assertRaises(ValueError, __import__, '')
def test_abs(self):
# int
@@ -1317,6 +1318,9 @@ class BuiltinTest(unittest.TestCase):
self.assertEqual(round(-8.0, -1), -10.0)
+ # test new kwargs
+ self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
+
self.assertRaises(TypeError, round)
def test_setattr(self):
diff --git a/Lib/test/test_calendar.py b/Lib/test/test_calendar.py
index 34d365b..e414324 100644
--- a/Lib/test/test_calendar.py
+++ b/Lib/test/test_calendar.py
@@ -4,6 +4,202 @@ import unittest
from test import test_support
+result_2004_text = """
+ 2004
+
+ January February March
+Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
+ 1 2 3 4 1 1 2 3 4 5 6 7
+ 5 6 7 8 9 10 11 2 3 4 5 6 7 8 8 9 10 11 12 13 14
+12 13 14 15 16 17 18 9 10 11 12 13 14 15 15 16 17 18 19 20 21
+19 20 21 22 23 24 25 16 17 18 19 20 21 22 22 23 24 25 26 27 28
+26 27 28 29 30 31 23 24 25 26 27 28 29 29 30 31
+
+ April May June
+Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
+ 1 2 3 4 1 2 1 2 3 4 5 6
+ 5 6 7 8 9 10 11 3 4 5 6 7 8 9 7 8 9 10 11 12 13
+12 13 14 15 16 17 18 10 11 12 13 14 15 16 14 15 16 17 18 19 20
+19 20 21 22 23 24 25 17 18 19 20 21 22 23 21 22 23 24 25 26 27
+26 27 28 29 30 24 25 26 27 28 29 30 28 29 30
+ 31
+
+ July August September
+Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
+ 1 2 3 4 1 1 2 3 4 5
+ 5 6 7 8 9 10 11 2 3 4 5 6 7 8 6 7 8 9 10 11 12
+12 13 14 15 16 17 18 9 10 11 12 13 14 15 13 14 15 16 17 18 19
+19 20 21 22 23 24 25 16 17 18 19 20 21 22 20 21 22 23 24 25 26
+26 27 28 29 30 31 23 24 25 26 27 28 29 27 28 29 30
+ 30 31
+
+ October November December
+Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
+ 1 2 3 1 2 3 4 5 6 7 1 2 3 4 5
+ 4 5 6 7 8 9 10 8 9 10 11 12 13 14 6 7 8 9 10 11 12
+11 12 13 14 15 16 17 15 16 17 18 19 20 21 13 14 15 16 17 18 19
+18 19 20 21 22 23 24 22 23 24 25 26 27 28 20 21 22 23 24 25 26
+25 26 27 28 29 30 31 29 30 27 28 29 30 31
+"""
+
+result_2004_html = """
+<?xml version="1.0" encoding="ascii"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=ascii" />
+<link rel="stylesheet" type="text/css" href="calendar.css" />
+<title>Calendar for 2004</title
+</head>
+<body>
+<table border="0" cellpadding="0" cellspacing="0" class="year">
+<tr><th colspan="3" class="year">2004</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">January</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
+<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
+<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
+<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
+<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">February</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="sun">1</td></tr>
+<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
+<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
+<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
+<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">March</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
+<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
+<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
+<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
+<tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">April</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
+<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
+<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
+<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
+<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">May</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="sat">1</td><td class="sun">2</td></tr>
+<tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
+<tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
+<tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
+<tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
+<tr><td class="mon">31</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">June</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
+<tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
+<tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
+<tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
+<tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">July</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
+<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
+<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
+<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
+<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">August</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="sun">1</td></tr>
+<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
+<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
+<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
+<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
+<tr><td class="mon">30</td><td class="tue">31</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">September</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
+<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
+<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
+<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
+<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">October</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
+<tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
+<tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
+<tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
+<tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">November</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
+<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
+<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
+<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
+<tr><td class="mon">29</td><td class="tue">30</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
+<tr><th colspan="7" class="month">December</th></tr>
+<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
+<tr><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
+<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
+<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
+<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
+<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday">&nbsp;</td><td class="noday">&nbsp;</td></tr>
+</table>
+</td></tr></table></body>
+</html>
+"""
+
+
+class OutputTestCase(unittest.TestCase):
+ def normalize_calendar(self, s):
+ # Filters out locale dependant strings
+ def neitherspacenordigit(c):
+ return not c.isspace() and not c.isdigit()
+
+ lines = []
+ for line in s.splitlines(False):
+ # Drop texts, as they are locale dependent
+ if line and not filter(neitherspacenordigit, line):
+ lines.append(line)
+ return lines
+
+ def test_output(self):
+ self.assertEqual(
+ self.normalize_calendar(calendar.calendar(2004)),
+ self.normalize_calendar(result_2004_text)
+ )
+
+ def test_output_textcalendar(self):
+ self.assertEqual(
+ calendar.TextCalendar().formatyear(2004).strip(),
+ result_2004_text.strip()
+ )
+
+ def test_output_htmlcalendar(self):
+ self.assertEqual(
+ calendar.HTMLCalendar().formatyearpage(2004).strip(),
+ result_2004_html.strip()
+ )
+
+
class CalendarTestCase(unittest.TestCase):
def test_isleap(self):
# Make sure that the return is right for a few years, and
@@ -72,57 +268,57 @@ class MondayTestCase(MonthCalendarTestCase):
firstweekday = calendar.MONDAY
def test_february(self):
- # A 28-day february starting of monday (7+7+7+7 days)
+ # A 28-day february starting on monday (7+7+7+7 days)
self.check_weeks(1999, 2, (7, 7, 7, 7))
- # A 28-day february starting of tuesday (6+7+7+7+1 days)
+ # A 28-day february starting on tuesday (6+7+7+7+1 days)
self.check_weeks(2005, 2, (6, 7, 7, 7, 1))
- # A 28-day february starting of sunday (1+7+7+7+6 days)
+ # A 28-day february starting on sunday (1+7+7+7+6 days)
self.check_weeks(1987, 2, (1, 7, 7, 7, 6))
- # A 29-day february starting of monday (7+7+7+7+1 days)
+ # A 29-day february starting on monday (7+7+7+7+1 days)
self.check_weeks(1988, 2, (7, 7, 7, 7, 1))
- # A 29-day february starting of tuesday (6+7+7+7+2 days)
+ # A 29-day february starting on tuesday (6+7+7+7+2 days)
self.check_weeks(1972, 2, (6, 7, 7, 7, 2))
- # A 29-day february starting of sunday (1+7+7+7+7 days)
+ # A 29-day february starting on sunday (1+7+7+7+7 days)
self.check_weeks(2004, 2, (1, 7, 7, 7, 7))
def test_april(self):
- # A 30-day april starting of monday (7+7+7+7+2 days)
+ # A 30-day april starting on monday (7+7+7+7+2 days)
self.check_weeks(1935, 4, (7, 7, 7, 7, 2))
- # A 30-day april starting of tuesday (6+7+7+7+3 days)
+ # A 30-day april starting on tuesday (6+7+7+7+3 days)
self.check_weeks(1975, 4, (6, 7, 7, 7, 3))
- # A 30-day april starting of sunday (1+7+7+7+7+1 days)
+ # A 30-day april starting on sunday (1+7+7+7+7+1 days)
self.check_weeks(1945, 4, (1, 7, 7, 7, 7, 1))
- # A 30-day april starting of saturday (2+7+7+7+7 days)
+ # A 30-day april starting on saturday (2+7+7+7+7 days)
self.check_weeks(1995, 4, (2, 7, 7, 7, 7))
- # A 30-day april starting of friday (3+7+7+7+6 days)
+ # A 30-day april starting on friday (3+7+7+7+6 days)
self.check_weeks(1994, 4, (3, 7, 7, 7, 6))
def test_december(self):
- # A 31-day december starting of monday (7+7+7+7+3 days)
+ # A 31-day december starting on monday (7+7+7+7+3 days)
self.check_weeks(1980, 12, (7, 7, 7, 7, 3))
- # A 31-day december starting of tuesday (6+7+7+7+4 days)
+ # A 31-day december starting on tuesday (6+7+7+7+4 days)
self.check_weeks(1987, 12, (6, 7, 7, 7, 4))
- # A 31-day december starting of sunday (1+7+7+7+7+2 days)
+ # A 31-day december starting on sunday (1+7+7+7+7+2 days)
self.check_weeks(1968, 12, (1, 7, 7, 7, 7, 2))
- # A 31-day december starting of thursday (4+7+7+7+6 days)
+ # A 31-day december starting on thursday (4+7+7+7+6 days)
self.check_weeks(1988, 12, (4, 7, 7, 7, 6))
- # A 31-day december starting of friday (3+7+7+7+7 days)
+ # A 31-day december starting on friday (3+7+7+7+7 days)
self.check_weeks(2017, 12, (3, 7, 7, 7, 7))
- # A 31-day december starting of saturday (2+7+7+7+7+1 days)
+ # A 31-day december starting on saturday (2+7+7+7+7+1 days)
self.check_weeks(2068, 12, (2, 7, 7, 7, 7, 1))
@@ -130,62 +326,63 @@ class SundayTestCase(MonthCalendarTestCase):
firstweekday = calendar.SUNDAY
def test_february(self):
- # A 28-day february starting of sunday (7+7+7+7 days)
+ # A 28-day february starting on sunday (7+7+7+7 days)
self.check_weeks(2009, 2, (7, 7, 7, 7))
- # A 28-day february starting of monday (6+7+7+7+1 days)
+ # A 28-day february starting on monday (6+7+7+7+1 days)
self.check_weeks(1999, 2, (6, 7, 7, 7, 1))
- # A 28-day february starting of saturday (1+7+7+7+6 days)
+ # A 28-day february starting on saturday (1+7+7+7+6 days)
self.check_weeks(1997, 2, (1, 7, 7, 7, 6))
- # A 29-day february starting of sunday (7+7+7+7+1 days)
+ # A 29-day february starting on sunday (7+7+7+7+1 days)
self.check_weeks(2004, 2, (7, 7, 7, 7, 1))
- # A 29-day february starting of monday (6+7+7+7+2 days)
+ # A 29-day february starting on monday (6+7+7+7+2 days)
self.check_weeks(1960, 2, (6, 7, 7, 7, 2))
- # A 29-day february starting of saturday (1+7+7+7+7 days)
+ # A 29-day february starting on saturday (1+7+7+7+7 days)
self.check_weeks(1964, 2, (1, 7, 7, 7, 7))
def test_april(self):
- # A 30-day april starting of sunday (7+7+7+7+2 days)
+ # A 30-day april starting on sunday (7+7+7+7+2 days)
self.check_weeks(1923, 4, (7, 7, 7, 7, 2))
- # A 30-day april starting of monday (6+7+7+7+3 days)
+ # A 30-day april starting on monday (6+7+7+7+3 days)
self.check_weeks(1918, 4, (6, 7, 7, 7, 3))
- # A 30-day april starting of saturday (1+7+7+7+7+1 days)
+ # A 30-day april starting on saturday (1+7+7+7+7+1 days)
self.check_weeks(1950, 4, (1, 7, 7, 7, 7, 1))
- # A 30-day april starting of friday (2+7+7+7+7 days)
+ # A 30-day april starting on friday (2+7+7+7+7 days)
self.check_weeks(1960, 4, (2, 7, 7, 7, 7))
- # A 30-day april starting of thursday (3+7+7+7+6 days)
+ # A 30-day april starting on thursday (3+7+7+7+6 days)
self.check_weeks(1909, 4, (3, 7, 7, 7, 6))
def test_december(self):
- # A 31-day december starting of sunday (7+7+7+7+3 days)
+ # A 31-day december starting on sunday (7+7+7+7+3 days)
self.check_weeks(2080, 12, (7, 7, 7, 7, 3))
- # A 31-day december starting of monday (6+7+7+7+4 days)
+ # A 31-day december starting on monday (6+7+7+7+4 days)
self.check_weeks(1941, 12, (6, 7, 7, 7, 4))
- # A 31-day december starting of saturday (1+7+7+7+7+2 days)
+ # A 31-day december starting on saturday (1+7+7+7+7+2 days)
self.check_weeks(1923, 12, (1, 7, 7, 7, 7, 2))
- # A 31-day december starting of wednesday (4+7+7+7+6 days)
+ # A 31-day december starting on wednesday (4+7+7+7+6 days)
self.check_weeks(1948, 12, (4, 7, 7, 7, 6))
- # A 31-day december starting of thursday (3+7+7+7+7 days)
+ # A 31-day december starting on thursday (3+7+7+7+7 days)
self.check_weeks(1927, 12, (3, 7, 7, 7, 7))
- # A 31-day december starting of friday (2+7+7+7+7+1 days)
+ # A 31-day december starting on friday (2+7+7+7+7+1 days)
self.check_weeks(1995, 12, (2, 7, 7, 7, 7, 1))
def test_main():
test_support.run_unittest(
+ OutputTestCase,
CalendarTestCase,
MondayTestCase,
SundayTestCase
diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py
index 1dd2461..cdd84bb 100644
--- a/Lib/test/test_capi.py
+++ b/Lib/test/test_capi.py
@@ -5,44 +5,51 @@ import sys
from test import test_support
import _testcapi
-for name in dir(_testcapi):
- if name.startswith('test_'):
- test = getattr(_testcapi, name)
+def test_main():
+
+ for name in dir(_testcapi):
+ if name.startswith('test_'):
+ test = getattr(_testcapi, name)
+ if test_support.verbose:
+ print "internal", name
+ try:
+ test()
+ except _testcapi.error:
+ raise test_support.TestFailed, sys.exc_info()[1]
+
+ # some extra thread-state tests driven via _testcapi
+ def TestThreadState():
+ import thread
+ import time
+
if test_support.verbose:
- print "internal", name
- try:
- test()
- except _testcapi.error:
- raise test_support.TestFailed, sys.exc_info()[1]
-
-# some extra thread-state tests driven via _testcapi
-def TestThreadState():
- import thread
- import time
-
- if test_support.verbose:
- print "auto-thread-state"
-
- idents = []
-
- def callback():
- idents.append(thread.get_ident())
-
- _testcapi._test_thread_state(callback)
- time.sleep(1)
- # Check our main thread is in the list exactly 3 times.
- if idents.count(thread.get_ident()) != 3:
- raise test_support.TestFailed, \
- "Couldn't find main thread correctly in the list"
-
-try:
- _testcapi._test_thread_state
- have_thread_state = True
-except AttributeError:
- have_thread_state = False
-
-if have_thread_state:
- TestThreadState()
- import threading
- t=threading.Thread(target=TestThreadState)
- t.start()
+ print "auto-thread-state"
+
+ idents = []
+
+ def callback():
+ idents.append(thread.get_ident())
+
+ _testcapi._test_thread_state(callback)
+ a = b = callback
+ time.sleep(1)
+ # Check our main thread is in the list exactly 3 times.
+ if idents.count(thread.get_ident()) != 3:
+ raise test_support.TestFailed, \
+ "Couldn't find main thread correctly in the list"
+
+ try:
+ _testcapi._test_thread_state
+ have_thread_state = True
+ except AttributeError:
+ have_thread_state = False
+
+ if have_thread_state:
+ TestThreadState()
+ import threading
+ t=threading.Thread(target=TestThreadState)
+ t.start()
+ t.join()
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_cmd_line.py b/Lib/test/test_cmd_line.py
index a4a656d..018bec6 100644
--- a/Lib/test/test_cmd_line.py
+++ b/Lib/test/test_cmd_line.py
@@ -10,6 +10,9 @@ class CmdLineTest(unittest.TestCase):
infp.close()
data = outfp.read()
outfp.close()
+ # try to cleanup the child so we don't appear to leak when running
+ # with regrtest -R. This should be a no-op on Windows.
+ popen2._cleanup()
return data
def exit_code(self, cmd_line):
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 913aa91..6ea49cc 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -1,7 +1,7 @@
from test import test_support
import unittest
import codecs
-import sys, StringIO
+import sys, StringIO, _testcapi
class Queue(object):
"""
@@ -781,9 +781,18 @@ class NameprepTest(unittest.TestCase):
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
-class CodecTest(unittest.TestCase):
- def test_builtin(self):
+class IDNACodecTest(unittest.TestCase):
+ def test_builtin_decode(self):
self.assertEquals(unicode("python.org", "idna"), u"python.org")
+ self.assertEquals(unicode("python.org.", "idna"), u"python.org.")
+ self.assertEquals(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
+ self.assertEquals(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
+
+ def test_builtin_encode(self):
+ self.assertEquals(u"python.org".encode("idna"), "python.org")
+ self.assertEquals("python.org.".encode("idna"), "python.org.")
+ self.assertEquals(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
+ self.assertEquals(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
@@ -791,6 +800,64 @@ class CodecTest(unittest.TestCase):
r.read(3)
self.assertEquals(r.read(), u"")
+ def test_incremental_decode(self):
+ self.assertEquals(
+ "".join(codecs.iterdecode("python.org", "idna")),
+ u"python.org"
+ )
+ self.assertEquals(
+ "".join(codecs.iterdecode("python.org.", "idna")),
+ u"python.org."
+ )
+ self.assertEquals(
+ "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
+ u"pyth\xf6n.org."
+ )
+ self.assertEquals(
+ "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
+ u"pyth\xf6n.org."
+ )
+
+ decoder = codecs.getincrementaldecoder("idna")()
+ self.assertEquals(decoder.decode("xn--xam", ), u"")
+ self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
+ self.assertEquals(decoder.decode(u"rg"), u"")
+ self.assertEquals(decoder.decode(u"", True), u"org")
+
+ decoder.reset()
+ self.assertEquals(decoder.decode("xn--xam", ), u"")
+ self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
+ self.assertEquals(decoder.decode("rg."), u"org.")
+ self.assertEquals(decoder.decode("", True), u"")
+
+ def test_incremental_encode(self):
+ self.assertEquals(
+ "".join(codecs.iterencode(u"python.org", "idna")),
+ "python.org"
+ )
+ self.assertEquals(
+ "".join(codecs.iterencode(u"python.org.", "idna")),
+ "python.org."
+ )
+ self.assertEquals(
+ "".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
+ "xn--pythn-mua.org."
+ )
+ self.assertEquals(
+ "".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
+ "xn--pythn-mua.org."
+ )
+
+ encoder = codecs.getincrementalencoder("idna")()
+ self.assertEquals(encoder.encode(u"\xe4x"), "")
+ self.assertEquals(encoder.encode(u"ample.org"), "xn--xample-9ta.")
+ self.assertEquals(encoder.encode(u"", True), "org")
+
+ encoder.reset()
+ self.assertEquals(encoder.encode(u"\xe4x"), "")
+ self.assertEquals(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
+ self.assertEquals(encoder.encode(u"", True), "")
+
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
@@ -1032,9 +1099,11 @@ class BasicUnicodeTest(unittest.TestCase):
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
- # check incremental decoder/encoder and iterencode()/iterdecode()
+ # check incremental decoder/encoder (fetched via the Python
+ # and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
+ cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
@@ -1042,10 +1111,24 @@ class BasicUnicodeTest(unittest.TestCase):
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
+ encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
+ decodedresult += decoder.decode("", True)
+ self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
+
+ # check C API
+ encodedresult = ""
+ for c in s:
+ encodedresult += cencoder.encode(c)
+ encodedresult += cencoder.encode(u"", True)
+ cdecoder = _testcapi.codec_incrementaldecoder(encoding)
+ decodedresult = u""
+ for c in encodedresult:
+ decodedresult += cdecoder.decode(c)
+ decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
@@ -1142,7 +1225,7 @@ def test_main():
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
- CodecTest,
+ IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
Str2StrTest,
diff --git a/Lib/test/test_coercion.py b/Lib/test/test_coercion.py
index e12ef0d..964f161 100644
--- a/Lib/test/test_coercion.py
+++ b/Lib/test/test_coercion.py
@@ -1,6 +1,8 @@
import copy
import sys
import warnings
+import unittest
+from test.test_support import run_unittest
# Fake a number that implements numeric methods through __coerce__
class CoerceNumber:
@@ -16,10 +18,19 @@ class CoerceNumber:
else:
return (self.arg, other)
+# New-style class version of CoerceNumber
+class CoerceTo(object):
+ def __init__(self, arg):
+ self.arg = arg
+ def __coerce__(self, other):
+ if isinstance(other, CoerceTo):
+ return self.arg, other.arg
+ else:
+ return self.arg, other
+
# Fake a number that implements numeric ops through methods.
class MethodNumber:
-
def __init__(self,arg):
self.arg = arg
@@ -50,6 +61,18 @@ class MethodNumber:
def __rtruediv__(self,other):
return other / self.arg
+ def __truediv__(self,other):
+ return self.arg / other
+
+ def __rtruediv__(self,other):
+ return other / self.arg
+
+ def __floordiv__(self,other):
+ return self.arg // other
+
+ def __rfloordiv__(self,other):
+ return other // self.arg
+
def __pow__(self,other):
return self.arg ** other
@@ -66,11 +89,157 @@ class MethodNumber:
return cmp(self.arg, other)
-candidates = [ 2, 4.0, 2L, 2+0j, [1], (2,), None,
- MethodNumber(2), CoerceNumber(2)]
+candidates = [2, 2L, 4.0, 2+0j, [1], (2,), None,
+ MethodNumber(2), CoerceNumber(2)]
+
+infix_binops = [ '+', '-', '*', '**', '%', '//', '/' ]
+
+TE = TypeError
+# b = both normal and augmented give same result list
+# s = single result lists for normal and augmented
+# e = equals other results
+# result lists: ['+', '-', '*', '**', '%', '//', ('classic /', 'new /')]
+# ^^^^^^^^^^^^^^^^^^^^^^
+# 2-tuple if results differ
+# else only one value
+infix_results = {
+ # 2
+ (0,0): ('b', [4, 0, 4, 4, 0, 1, (1, 1.0)]),
+ (0,1): ('e', (0,0)),
+ (0,2): ('b', [6.0, -2.0, 8.0, 16.0, 2.0, 0.0, 0.5]),
+ (0,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
+ (0,4): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
+ (0,5): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
+ (0,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (0,7): ('e', (0,0)),
+ (0,8): ('e', (0,0)),
+
+ # 2L
+ (1,0): ('e', (0,0)),
+ (1,1): ('e', (0,1)),
+ (1,2): ('e', (0,2)),
+ (1,3): ('e', (0,3)),
+ (1,4): ('e', (0,4)),
+ (1,5): ('e', (0,5)),
+ (1,6): ('e', (0,6)),
+ (1,7): ('e', (0,7)),
+ (1,8): ('e', (0,8)),
+
+ # 4.0
+ (2,0): ('b', [6.0, 2.0, 8.0, 16.0, 0.0, 2.0, 2.0]),
+ (2,1): ('e', (2,0)),
+ (2,2): ('b', [8.0, 0.0, 16.0, 256.0, 0.0, 1.0, 1.0]),
+ (2,3): ('b', [6+0j, 2+0j, 8+0j, 16+0j, 0+0j, 2+0j, 2+0j]),
+ (2,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (2,5): ('e', (2,4)),
+ (2,6): ('e', (2,4)),
+ (2,7): ('e', (2,0)),
+ (2,8): ('e', (2,0)),
+
+ # (2+0j)
+ (3,0): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
+ (3,1): ('e', (3,0)),
+ (3,2): ('b', [6+0j, -2+0j, 8+0j, 16+0j, 2+0j, 0+0j, 0.5+0j]),
+ (3,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
+ (3,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (3,5): ('e', (3,4)),
+ (3,6): ('e', (3,4)),
+ (3,7): ('e', (3,0)),
+ (3,8): ('e', (3,0)),
+
+ # [1]
+ (4,0): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
+ (4,1): ('e', (4,0)),
+ (4,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (4,3): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (4,4): ('b', [[1, 1], TE, TE, TE, TE, TE, TE]),
+ (4,5): ('s', [TE, TE, TE, TE, TE, TE, TE], [[1, 2], TE, TE, TE, TE, TE, TE]),
+ (4,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (4,7): ('e', (4,0)),
+ (4,8): ('e', (4,0)),
+
+ # (2,)
+ (5,0): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
+ (5,1): ('e', (5,0)),
+ (5,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (5,3): ('e', (5,2)),
+ (5,4): ('e', (5,2)),
+ (5,5): ('b', [(2, 2), TE, TE, TE, TE, TE, TE]),
+ (5,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (5,7): ('e', (5,0)),
+ (5,8): ('e', (5,0)),
+
+ # None
+ (6,0): ('b', [TE, TE, TE, TE, TE, TE, TE]),
+ (6,1): ('e', (6,0)),
+ (6,2): ('e', (6,0)),
+ (6,3): ('e', (6,0)),
+ (6,4): ('e', (6,0)),
+ (6,5): ('e', (6,0)),
+ (6,6): ('e', (6,0)),
+ (6,7): ('e', (6,0)),
+ (6,8): ('e', (6,0)),
+
+ # MethodNumber(2)
+ (7,0): ('e', (0,0)),
+ (7,1): ('e', (0,1)),
+ (7,2): ('e', (0,2)),
+ (7,3): ('e', (0,3)),
+ (7,4): ('e', (0,4)),
+ (7,5): ('e', (0,5)),
+ (7,6): ('e', (0,6)),
+ (7,7): ('e', (0,7)),
+ (7,8): ('e', (0,8)),
+
+ # CoerceNumber(2)
+ (8,0): ('e', (0,0)),
+ (8,1): ('e', (0,1)),
+ (8,2): ('e', (0,2)),
+ (8,3): ('e', (0,3)),
+ (8,4): ('e', (0,4)),
+ (8,5): ('e', (0,5)),
+ (8,6): ('e', (0,6)),
+ (8,7): ('e', (0,7)),
+ (8,8): ('e', (0,8)),
+}
+
+def process_infix_results():
+ for key in sorted(infix_results):
+ val = infix_results[key]
+ if val[0] == 'e':
+ infix_results[key] = infix_results[val[1]]
+ else:
+ if val[0] == 's':
+ res = (val[1], val[2])
+ elif val[0] == 'b':
+ res = (val[1], val[1])
+ for i in range(1):
+ if isinstance(res[i][6], tuple):
+ if 1/2 == 0:
+ # testing with classic (floor) division
+ res[i][6] = res[i][6][0]
+ else:
+ # testing with -Qnew
+ res[i][6] = res[i][6][1]
+ infix_results[key] = res
+
+
+
+process_infix_results()
+# now infix_results has two lists of results for every pairing.
-infix_binops = [ '+', '-', '*', '/', '**', '%' ]
prefix_binops = [ 'divmod' ]
+prefix_results = [
+ [(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)],
+ [(1L,0L), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1L,0L)],
+ [(2.0,0.0), (2.0,0.0), (1.0,0.0), ((2+0j),0j), TE, TE, TE, TE, (2.0,0.0)],
+ [((1+0j),0j), ((1+0j),0j), (0j,(2+0j)), ((1+0j),0j), TE, TE, TE, TE, ((1+0j),0j)],
+ [TE, TE, TE, TE, TE, TE, TE, TE, TE],
+ [TE, TE, TE, TE, TE, TE, TE, TE, TE],
+ [TE, TE, TE, TE, TE, TE, TE, TE, TE],
+ [TE, TE, TE, TE, TE, TE, TE, TE, TE],
+ [(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)]
+]
def format_float(value):
if abs(value) < 0.01:
@@ -87,83 +256,74 @@ def format_result(value):
return format_float(value)
return str(value)
-def do_infix_binops():
- for a in candidates:
- for b in candidates:
- for op in infix_binops:
- print '%s %s %s' % (a, op, b),
- try:
- x = eval('a %s b' % op)
- except:
- error = sys.exc_info()[:2]
- print '... %s.%s' % (error[0].__module__, error[0].__name__)
- else:
- print '=', format_result(x)
- try:
- z = copy.copy(a)
- except copy.Error:
- z = a # assume it has no inplace ops
- print '%s %s= %s' % (a, op, b),
- try:
- exec('z %s= b' % op)
- except:
- error = sys.exc_info()[:2]
- print '... %s.%s' % (error[0].__module__, error[0].__name__)
- else:
- print '=>', format_result(z)
-
-def do_prefix_binops():
- for a in candidates:
- for b in candidates:
- for op in prefix_binops:
- print '%s(%s, %s)' % (op, a, b),
- try:
- x = eval('%s(a, b)' % op)
- except:
- error = sys.exc_info()[:2]
- print '... %s.%s' % (error[0].__module__, error[0].__name__)
- else:
- print '=', format_result(x)
+class CoercionTest(unittest.TestCase):
+ def test_infix_binops(self):
+ for ia, a in enumerate(candidates):
+ for ib, b in enumerate(candidates):
+ results = infix_results[(ia, ib)]
+ for op, res, ires in zip(infix_binops, results[0], results[1]):
+ if res is TE:
+ self.assertRaises(TypeError, eval,
+ 'a %s b' % op, {'a': a, 'b': b})
+ else:
+ self.assertEquals(format_result(res),
+ format_result(eval('a %s b' % op)),
+ '%s %s %s == %s failed' % (a, op, b, res))
+ try:
+ z = copy.copy(a)
+ except copy.Error:
+ z = a # assume it has no inplace ops
+ if ires is TE:
+ try:
+ exec 'z %s= b' % op
+ except TypeError:
+ pass
+ else:
+ self.fail("TypeError not raised")
+ else:
+ exec('z %s= b' % op)
+ self.assertEquals(ires, z)
-# New-style class version of CoerceNumber
-class CoerceTo(object):
- def __init__(self, arg):
- self.arg = arg
- def __coerce__(self, other):
- if isinstance(other, CoerceTo):
- return self.arg, other.arg
- else:
- return self.arg, other
+ def test_prefix_binops(self):
+ for ia, a in enumerate(candidates):
+ for ib, b in enumerate(candidates):
+ for op in prefix_binops:
+ res = prefix_results[ia][ib]
+ if res is TE:
+ self.assertRaises(TypeError, eval,
+ '%s(a, b)' % op, {'a': a, 'b': b})
+ else:
+ self.assertEquals(format_result(res),
+ format_result(eval('%s(a, b)' % op)),
+ '%s(%s, %s) == %s failed' % (op, a, b, res))
+
+ def test_cmptypes(self):
+ # Built-in tp_compare slots expect their arguments to have the
+ # same type, but a user-defined __coerce__ doesn't have to obey.
+ # SF #980352
+ evil_coercer = CoerceTo(42)
+ # Make sure these don't crash any more
+ self.assertNotEquals(cmp(u'fish', evil_coercer), 0)
+ self.assertNotEquals(cmp(slice(1), evil_coercer), 0)
+ # ...but that this still works
+ class WackyComparer(object):
+ def __cmp__(slf, other):
+ self.assert_(other == 42, 'expected evil_coercer, got %r' % other)
+ return 0
+ self.assertEquals(cmp(WackyComparer(), evil_coercer), 0)
+ # ...and classic classes too, since that code path is a little different
+ class ClassicWackyComparer:
+ def __cmp__(slf, other):
+ self.assert_(other == 42, 'expected evil_coercer, got %r' % other)
+ return 0
+ self.assertEquals(cmp(ClassicWackyComparer(), evil_coercer), 0)
+
+def test_main():
+ warnings.filterwarnings("ignore",
+ r'complex divmod\(\), // and % are deprecated',
+ DeprecationWarning,
+ r'test.test_coercion$')
+ run_unittest(CoercionTest)
-def assert_(expr, msg=None):
- if not expr:
- raise AssertionError, msg
-
-def do_cmptypes():
- # Built-in tp_compare slots expect their arguments to have the
- # same type, but a user-defined __coerce__ doesn't have to obey.
- # SF #980352
- evil_coercer = CoerceTo(42)
- # Make sure these don't crash any more
- assert_(cmp(u'fish', evil_coercer) != 0)
- assert_(cmp(slice(1), evil_coercer) != 0)
- # ...but that this still works
- class WackyComparer(object):
- def __cmp__(self, other):
- assert_(other == 42, 'expected evil_coercer, got %r' % other)
- return 0
- assert_(cmp(WackyComparer(), evil_coercer) == 0)
- # ...and classic classes too, since that code path is a little different
- class ClassicWackyComparer:
- def __cmp__(self, other):
- assert_(other == 42, 'expected evil_coercer, got %r' % other)
- return 0
- assert_(cmp(ClassicWackyComparer(), evil_coercer) == 0)
-
-warnings.filterwarnings("ignore",
- r'complex divmod\(\), // and % are deprecated',
- DeprecationWarning,
- r'test.test_coercion$')
-do_infix_binops()
-do_prefix_binops()
-do_cmptypes()
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_compare.py b/Lib/test/test_compare.py
index 6899926..2fde614 100644
--- a/Lib/test/test_compare.py
+++ b/Lib/test/test_compare.py
@@ -1,4 +1,6 @@
import sys
+import unittest
+from test import test_support
class Empty:
def __repr__(self):
@@ -27,28 +29,31 @@ class Cmp:
def __cmp__(self, other):
return cmp(self.arg, other)
+class ComparisonTest(unittest.TestCase):
+ set1 = [2, 2.0, 2L, 2+0j, Coerce(2), Cmp(2.0)]
+ set2 = [[1], (3,), None, Empty()]
+ candidates = set1 + set2
-candidates = [2, 2.0, 2L, 2+0j, [1], (3,), None, Empty(), Coerce(2), Cmp(2.0)]
-
-def test():
- for a in candidates:
- for b in candidates:
- try:
- x = a == b
- except:
- print 'cmp(%s, %s) => %s' % (a, b, sys.exc_info()[0])
- else:
- if x:
- print "%s == %s" % (a, b)
+ def test_comparisons(self):
+ for a in self.candidates:
+ for b in self.candidates:
+ if ((a in self.set1) and (b in self.set1)) or a is b:
+ self.assertEqual(a, b)
else:
- print "%s != %s" % (a, b)
- # Ensure default comparison compares id() of args
- L = []
- for i in range(10):
- L.insert(len(L)//2, Empty())
- for a in L:
- for b in L:
- if cmp(a, b) != cmp(id(a), id(b)):
- print "ERROR:", cmp(a, b), cmp(id(a), id(b)), id(a), id(b)
-
-test()
+ self.assertNotEqual(a, b)
+
+ def test_id_comparisons(self):
+ # Ensure default comparison compares id() of args
+ L = []
+ for i in range(10):
+ L.insert(len(L)//2, Empty())
+ for a in L:
+ for b in L:
+ self.assertEqual(cmp(a, b), cmp(id(a), id(b)),
+ 'a=%r, b=%r' % (a, b))
+
+def test_main():
+ test_support.run_unittest(ComparisonTest)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py
index 1d47f91..72c4f7e 100644
--- a/Lib/test/test_compile.py
+++ b/Lib/test/test_compile.py
@@ -284,6 +284,10 @@ if 1:
f1, f2 = f()
self.assertNotEqual(id(f1.func_code), id(f2.func_code))
+ def test_unicode_encoding(self):
+ code = u"# -*- coding: utf-8 -*-\npass\n"
+ self.assertRaises(SyntaxError, compile, code, "tmp", "exec")
+
def test_subscripts(self):
# SF bug 1448804
# Class to make testing subscript results easy
diff --git a/Lib/test/test_compiler.py b/Lib/test/test_compiler.py
index 5e7b15c..a59d6aa 100644
--- a/Lib/test/test_compiler.py
+++ b/Lib/test/test_compiler.py
@@ -1,10 +1,12 @@
import compiler
from compiler.ast import flatten
-import os
+import os, sys, time, unittest
import test.test_support
-import unittest
from random import random
+# How much time in seconds can pass before we print a 'Still working' message.
+_PRINT_WORKING_MSG_INTERVAL = 5 * 60
+
class CompilerTest(unittest.TestCase):
def testCompileLibrary(self):
@@ -13,11 +15,18 @@ class CompilerTest(unittest.TestCase):
# that any of the code is correct, merely the compiler is able
# to generate some kind of code for it.
+ next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
libdir = os.path.dirname(unittest.__file__)
testdir = os.path.dirname(test.test_support.__file__)
for dir in [libdir, testdir]:
for basename in os.listdir(dir):
+ # Print still working message since this test can be really slow
+ if next_time <= time.time():
+ next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
+ print >>sys.__stdout__, \
+ ' testCompileLibrary still working, be patient...'
+
if not basename.endswith(".py"):
continue
if not TEST_ALL and random() < 0.98:
diff --git a/Lib/test/test_contextlib.py b/Lib/test/test_contextlib.py
index f8db88c..97470c7 100644
--- a/Lib/test/test_contextlib.py
+++ b/Lib/test/test_contextlib.py
@@ -2,12 +2,14 @@
from __future__ import with_statement
+import sys
import os
import decimal
import tempfile
import unittest
import threading
from contextlib import * # Tests __all__
+from test.test_support import run_suite
class ContextManagerTestCase(unittest.TestCase):
@@ -45,6 +47,28 @@ class ContextManagerTestCase(unittest.TestCase):
self.fail("Expected ZeroDivisionError")
self.assertEqual(state, [1, 42, 999])
+ def test_contextmanager_no_reraise(self):
+ @contextmanager
+ def whee():
+ yield
+ ctx = whee().__context__()
+ ctx.__enter__()
+ # Calling __exit__ should not result in an exception
+ self.failIf(ctx.__exit__(TypeError, TypeError("foo"), None))
+
+ def test_contextmanager_trap_yield_after_throw(self):
+ @contextmanager
+ def whoo():
+ try:
+ yield
+ except:
+ yield
+ ctx = whoo().__context__()
+ ctx.__enter__()
+ self.assertRaises(
+ RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
+ )
+
def test_contextmanager_except(self):
state = []
@contextmanager
@@ -62,6 +86,21 @@ class ContextManagerTestCase(unittest.TestCase):
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
+ def test_contextmanager_attribs(self):
+ def attribs(**kw):
+ def decorate(func):
+ for k,v in kw.items():
+ setattr(func,k,v)
+ return func
+ return decorate
+ @contextmanager
+ @attribs(foo='bar')
+ def baz(spam):
+ """Whee!"""
+ self.assertEqual(baz.__name__,'baz')
+ self.assertEqual(baz.foo, 'bar')
+ self.assertEqual(baz.__doc__, "Whee!")
+
class NestedTestCase(unittest.TestCase):
# XXX This needs more work
@@ -274,21 +313,31 @@ class DecimalContextTestCase(unittest.TestCase):
def testBasic(self):
ctx = decimal.getcontext()
- ctx.prec = save_prec = decimal.ExtendedContext.prec + 5
- with decimal.ExtendedContext:
- self.assertEqual(decimal.getcontext().prec,
- decimal.ExtendedContext.prec)
- self.assertEqual(decimal.getcontext().prec, save_prec)
+ orig_context = ctx.copy()
try:
+ ctx.prec = save_prec = decimal.ExtendedContext.prec + 5
with decimal.ExtendedContext:
self.assertEqual(decimal.getcontext().prec,
decimal.ExtendedContext.prec)
- 1/0
- except ZeroDivisionError:
self.assertEqual(decimal.getcontext().prec, save_prec)
- else:
- self.fail("Didn't raise ZeroDivisionError")
+ try:
+ with decimal.ExtendedContext:
+ self.assertEqual(decimal.getcontext().prec,
+ decimal.ExtendedContext.prec)
+ 1/0
+ except ZeroDivisionError:
+ self.assertEqual(decimal.getcontext().prec, save_prec)
+ else:
+ self.fail("Didn't raise ZeroDivisionError")
+ finally:
+ decimal.setcontext(orig_context)
+
+# This is needed to make the test actually run under regrtest.py!
+def test_main():
+ run_suite(
+ unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
+ )
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/Lib/test/test_copy_reg.py b/Lib/test/test_copy_reg.py
index c41946a..c3d3964 100644
--- a/Lib/test/test_copy_reg.py
+++ b/Lib/test/test_copy_reg.py
@@ -8,6 +8,22 @@ class C:
pass
+class WithoutSlots(object):
+ pass
+
+class WithWeakref(object):
+ __slots__ = ('__weakref__',)
+
+class WithPrivate(object):
+ __slots__ = ('__spam',)
+
+class WithSingleString(object):
+ __slots__ = 'spam'
+
+class WithInherited(WithSingleString):
+ __slots__ = ('eggs',)
+
+
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
@@ -84,6 +100,19 @@ class CopyRegTestCase(unittest.TestCase):
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
+ def test_slotnames(self):
+ self.assertEquals(copy_reg._slotnames(WithoutSlots), [])
+ self.assertEquals(copy_reg._slotnames(WithWeakref), [])
+ expected = ['_WithPrivate__spam']
+ self.assertEquals(copy_reg._slotnames(WithPrivate), expected)
+ self.assertEquals(copy_reg._slotnames(WithSingleString), ['spam'])
+ expected = ['eggs', 'spam']
+ expected.sort()
+ result = copy_reg._slotnames(WithInherited)
+ result.sort()
+ self.assertEquals(result, expected)
+
+
def test_main():
test_support.run_unittest(CopyRegTestCase)
diff --git a/Lib/test/test_curses.py b/Lib/test/test_curses.py
index a4a45a7..dc2f20b 100644
--- a/Lib/test/test_curses.py
+++ b/Lib/test/test_curses.py
@@ -24,6 +24,9 @@ term = os.environ.get('TERM')
if not term or term == 'unknown':
raise TestSkipped, "$TERM=%r, calling initscr() may cause exit" % term
+if sys.platform == "cygwin":
+ raise TestSkipped("cygwin's curses mostly just hangs")
+
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
@@ -201,11 +204,13 @@ def module_funcs(stdscr):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
- curses.mousemask(curses.BUTTON1_PRESSED)
- curses.mouseinterval(10)
- # just verify these don't cause errors
- m = curses.getmouse()
- curses.ungetmouse(*m)
+ (availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
+ # availmask indicates that mouse stuff not available.
+ if availmask != 0:
+ curses.mouseinterval(10)
+ # just verify these don't cause errors
+ m = curses.getmouse()
+ curses.ungetmouse(*m)
def unit_tests():
from curses import ascii
diff --git a/Lib/test/test_datetime.py b/Lib/test/test_datetime.py
index 27f42c6..2528b4a 100644
--- a/Lib/test/test_datetime.py
+++ b/Lib/test/test_datetime.py
@@ -1168,6 +1168,17 @@ class TestDateTime(TestDate):
self.assertEqual(dt2 - dt1, us)
self.assert_(dt1 < dt2)
+ def test_strftime_with_bad_tzname_replace(self):
+ # verify ok if tzinfo.tzname().replace() returns a non-string
+ class MyTzInfo(FixedOffset):
+ def tzname(self, dt):
+ class MyStr(str):
+ def replace(self, *args):
+ return None
+ return MyStr('name')
+ t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
+ self.assertRaises(TypeError, t.strftime, '%Z')
+
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index 1d33ec4..341ad6d 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -29,7 +29,8 @@ import glob
import os, sys
import pickle, copy
from decimal import *
-from test.test_support import TestSkipped, run_unittest, run_doctest, is_resource_enabled
+from test.test_support import (TestSkipped, run_unittest, run_doctest,
+ is_resource_enabled)
import random
try:
import threading
@@ -39,12 +40,15 @@ except ImportError:
# Useful Test Constant
Signals = getcontext().flags.keys()
-# Tests are built around these assumed context defaults
-DefaultContext.prec=9
-DefaultContext.rounding=ROUND_HALF_EVEN
-DefaultContext.traps=dict.fromkeys(Signals, 0)
-setcontext(DefaultContext)
-
+# Tests are built around these assumed context defaults.
+# test_main() restores the original context.
+def init():
+ global ORIGINAL_CONTEXT
+ ORIGINAL_CONTEXT = getcontext().copy()
+ DefaultContext.prec = 9
+ DefaultContext.rounding = ROUND_HALF_EVEN
+ DefaultContext.traps = dict.fromkeys(Signals, 0)
+ setcontext(DefaultContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
@@ -503,16 +507,17 @@ class DecimalImplicitConstructionTest(unittest.TestCase):
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
- for sym, lop, rop in (
- ('+', '__add__', '__radd__'),
- ('-', '__sub__', '__rsub__'),
- ('*', '__mul__', '__rmul__'),
- ('/', '__truediv__', '__rtruediv__'),
- ('%', '__mod__', '__rmod__'),
- ('//', '__floordiv__', '__rfloordiv__'),
- ('**', '__pow__', '__rpow__'),
- ):
-
+ oplist = [
+ ('+', '__add__', '__radd__'),
+ ('-', '__sub__', '__rsub__'),
+ ('*', '__mul__', '__rmul__'),
+ ('/', '__truediv__', '__rtruediv__')
+ ('%', '__mod__', '__rmod__'),
+ ('//', '__floordiv__', '__rfloordiv__'),
+ ('**', '__pow__', '__rpow__')
+ ]
+
+ for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
@@ -1059,6 +1064,7 @@ def test_main(arith=False, verbose=None):
is enabled in regrtest.py
"""
+ init()
global TEST_ALL
TEST_ALL = arith or is_resource_enabled('decimal')
@@ -1073,10 +1079,12 @@ def test_main(arith=False, verbose=None):
DecimalTest,
]
- run_unittest(*test_classes)
- import decimal as DecimalModule
- run_doctest(DecimalModule, verbose)
-
+ try:
+ run_unittest(*test_classes)
+ import decimal as DecimalModule
+ run_doctest(DecimalModule, verbose)
+ finally:
+ setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
# Calling with no arguments runs all tests.
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index 185edb3..89cebb0 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -1638,7 +1638,9 @@ def specials():
c1 = C()
c2 = C()
verify(not not c1)
- vereq(hash(c1), id(c1))
+ verify(id(c1) != id(c2))
+ hash(c1)
+ hash(c2)
vereq(cmp(c1, c2), cmp(id(c1), id(c2)))
vereq(c1, c1)
verify(c1 != c2)
@@ -1660,7 +1662,9 @@ def specials():
d1 = D()
d2 = D()
verify(not not d1)
- vereq(hash(d1), id(d1))
+ verify(id(d1) != id(d2))
+ hash(d1)
+ hash(d2)
vereq(cmp(d1, d2), cmp(id(d1), id(d2)))
vereq(d1, d1)
verify(d1 != d2)
@@ -2914,7 +2918,7 @@ def subclasspropagation():
class D(B, C):
pass
d = D()
- vereq(hash(d), id(d))
+ orig_hash = hash(d) # related to id(d) in platform-dependent ways
A.__hash__ = lambda self: 42
vereq(hash(d), 42)
C.__hash__ = lambda self: 314
@@ -2930,7 +2934,7 @@ def subclasspropagation():
del C.__hash__
vereq(hash(d), 42)
del A.__hash__
- vereq(hash(d), id(d))
+ vereq(hash(d), orig_hash)
d.foo = 42
d.bar = 42
vereq(d.foo, 42)
diff --git a/Lib/test/test_difflib.py b/Lib/test/test_difflib.py
index 52feef0..83fad15 100644
--- a/Lib/test/test_difflib.py
+++ b/Lib/test/test_difflib.py
@@ -152,6 +152,10 @@ class TestSFpatches(unittest.TestCase):
difflib.SequenceMatcher(None, old, new).get_opcodes()
-Doctests = doctest.DocTestSuite(difflib)
+def test_main():
+ difflib.HtmlDiff._default_prefix = 0
+ Doctests = doctest.DocTestSuite(difflib)
+ run_unittest(TestSFpatches, TestSFbugs, Doctests)
-run_unittest(TestSFpatches, TestSFbugs, Doctests)
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_dl.py b/Lib/test/test_dl.py
index d1f73b2..b70a4cf 100755
--- a/Lib/test/test_dl.py
+++ b/Lib/test/test_dl.py
@@ -10,6 +10,7 @@ sharedlibs = [
('/usr/lib/libc.so', 'getpid'),
('/lib/libc.so.6', 'getpid'),
('/usr/bin/cygwin1.dll', 'getpid'),
+ ('/usr/lib/libc.dylib', 'getpid'),
]
for s, func in sharedlibs:
diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py
index 1f89ac2..b17607d 100644
--- a/Lib/test/test_doctest.py
+++ b/Lib/test/test_doctest.py
@@ -604,8 +604,8 @@ DocTestFinder finds the line number of each example:
... >>> for x in range(10):
... ... print x,
... 0 1 2 3 4 5 6 7 8 9
- ... >>> x/2
- ... 6.0
+ ... >>> x//2
+ ... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> [e.lineno for e in test.examples]
@@ -679,8 +679,8 @@ statistics. Here's a simple DocTest case we can use:
... >>> x = 12
... >>> print x
... 12
- ... >>> x/2
- ... 6.0
+ ... >>> x//2
+ ... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
@@ -700,8 +700,8 @@ the failure and proceeds to the next example:
... >>> x = 12
... >>> print x
... 14
- ... >>> x/2
- ... 6.0
+ ... >>> x//2
+ ... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
@@ -723,9 +723,9 @@ the failure and proceeds to the next example:
Got:
12
Trying:
- x/2
+ x//2
Expecting:
- 6.0
+ 6
ok
(1, 3)
"""
@@ -738,8 +738,8 @@ output:
... >>> x = 12
... >>> print x
... 12
- ... >>> x/2
- ... 6.0
+ ... >>> x//2
+ ... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
@@ -754,9 +754,9 @@ output:
12
ok
Trying:
- x/2
+ x//2
Expecting:
- 6.0
+ 6
ok
(0, 3)
@@ -784,9 +784,9 @@ iff `-v` appears in sys.argv:
12
ok
Trying:
- x/2
+ x//2
Expecting:
- 6.0
+ 6
ok
(0, 3)
@@ -806,9 +806,9 @@ replaced with any other string:
>>> def f(x):
... '''
... >>> x = 12
- ... >>> print x/0
+ ... >>> print x//0
... Traceback (most recent call last):
- ... ZeroDivisionError: float division
+ ... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
@@ -822,10 +822,10 @@ unexpected exception:
>>> def f(x):
... '''
... >>> x = 12
- ... >>> print 'pre-exception output', x/0
+ ... >>> print 'pre-exception output', x//0
... pre-exception output
... Traceback (most recent call last):
- ... ZeroDivisionError: float division
+ ... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
@@ -833,10 +833,10 @@ unexpected exception:
**********************************************************************
File ..., line 4, in f
Failed example:
- print 'pre-exception output', x/0
+ print 'pre-exception output', x//0
Exception raised:
...
- ZeroDivisionError: float division
+ ZeroDivisionError: integer division or modulo by zero
(1, 2)
Exception messages may contain newlines:
@@ -920,7 +920,7 @@ unexpected exception:
>>> def f(x):
... r'''
- ... >>> 1/0
+ ... >>> 1//0
... 0
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
@@ -929,11 +929,11 @@ unexpected exception:
**********************************************************************
File ..., line 3, in f
Failed example:
- 1/0
+ 1//0
Exception raised:
Traceback (most recent call last):
...
- ZeroDivisionError: float division
+ ZeroDivisionError: integer division or modulo by zero
(1, 1)
"""
def optionflags(): r"""
diff --git a/Lib/test/test_email_renamed.py b/Lib/test/test_email_renamed.py
new file mode 100644
index 0000000..c3af598
--- /dev/null
+++ b/Lib/test/test_email_renamed.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# email package unit tests
+
+import unittest
+# The specific tests now live in Lib/email/test
+from email.test.test_email_renamed import suite
+from test.test_support import run_suite
+
+def test_main():
+ run_suite(suite())
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_file.py b/Lib/test/test_file.py
index fd5670a..ab3da86 100644
--- a/Lib/test/test_file.py
+++ b/Lib/test/test_file.py
@@ -100,12 +100,18 @@ else:
print "writelines accepted sequence of non-string objects"
f.close()
-try:
- sys.stdin.seek(-1)
-except IOError:
- pass
+# This causes the interpreter to exit on OSF1 v5.1.
+if sys.platform != 'osf1V5':
+ try:
+ sys.stdin.seek(-1)
+ except IOError:
+ pass
+ else:
+ print "should not be able to seek on sys.stdin"
else:
- print "should not be able to seek on sys.stdin"
+ print >>sys.__stdout__, (
+ ' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
+ ' Test manually.')
try:
sys.stdin.truncate()
diff --git a/Lib/test/test_fileinput.py b/Lib/test/test_fileinput.py
index f3a7841..301769e 100644
--- a/Lib/test/test_fileinput.py
+++ b/Lib/test/test_fileinput.py
@@ -162,7 +162,10 @@ if verbose:
print "15. Unicode filenames"
try:
t1 = writeTmp(1, ["A\nB"])
- fi = FileInput(files=unicode(t1, sys.getfilesystemencoding()))
+ encoding = sys.getfilesystemencoding()
+ if encoding is None:
+ encoding = 'ascii'
+ fi = FileInput(files=unicode(t1, encoding))
lines = list(fi)
verify(lines == ["A\n", "B"])
finally:
diff --git a/Lib/test/test_fork1.py b/Lib/test/test_fork1.py
index aca7a84..cba5fc7 100644
--- a/Lib/test/test_fork1.py
+++ b/Lib/test/test_fork1.py
@@ -1,75 +1,23 @@
"""This test checks for correct fork() behavior.
-
-We want fork1() semantics -- only the forking thread survives in the
-child after a fork().
-
-On some systems (e.g. Solaris without posix threads) we find that all
-active threads survive in the child after a fork(); this is an error.
-
-While BeOS doesn't officially support fork and native threading in
-the same application, the present example should work just fine. DC
"""
-import os, sys, time, thread
-from test.test_support import verify, verbose, TestSkipped
+import os
+from test.fork_wait import ForkWait
+from test.test_support import TestSkipped, run_unittest
try:
os.fork
except AttributeError:
raise TestSkipped, "os.fork not defined -- skipping test_fork1"
-LONGSLEEP = 2
-
-SHORTSLEEP = 0.5
-
-NUM_THREADS = 4
-
-alive = {}
-
-stop = 0
-
-def f(id):
- while not stop:
- alive[id] = os.getpid()
- try:
- time.sleep(SHORTSLEEP)
- except IOError:
- pass
-
-def main():
- for i in range(NUM_THREADS):
- thread.start_new(f, (i,))
-
- time.sleep(LONGSLEEP)
-
- a = alive.keys()
- a.sort()
- verify(a == range(NUM_THREADS))
-
- prefork_lives = alive.copy()
-
- if sys.platform in ['unixware7']:
- cpid = os.fork1()
- else:
- cpid = os.fork()
-
- if cpid == 0:
- # Child
- time.sleep(LONGSLEEP)
- n = 0
- for key in alive.keys():
- if alive[key] != prefork_lives[key]:
- n = n+1
- os._exit(n)
- else:
- # Parent
+class ForkTest(ForkWait):
+ def wait_impl(self, cpid):
spid, status = os.waitpid(cpid, 0)
- verify(spid == cpid)
- verify(status == 0,
- "cause = %d, exit = %d" % (status&0xff, status>>8) )
- global stop
- # Tell threads to die
- stop = 1
- time.sleep(2*SHORTSLEEP) # Wait for threads to die
+ self.assertEqual(spid, cpid)
+ self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
+
+def test_main():
+ run_unittest(ForkTest)
-main()
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py
index 4be1b4c..a60a768 100644
--- a/Lib/test/test_generators.py
+++ b/Lib/test/test_generators.py
@@ -421,7 +421,6 @@ Subject: Re: PEP 255: Simple Generators
... self.name = name
... self.parent = None
... self.generator = self.generate()
-... self.close = self.generator.close
...
... def generate(self):
... while not self.parent:
@@ -484,8 +483,6 @@ A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
merged A into G
A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
->>> for s in sets: s.close() # break cycles
-
"""
# Emacs turd '
@@ -593,7 +590,6 @@ arguments are iterable -- a LazyList is the same as a generator to times().
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.next
-... self.close = g.close
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
@@ -624,8 +620,6 @@ efficient.
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
->>> m235.close()
-
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
@@ -648,7 +642,6 @@ Ye olde Fibonacci generator, LazyList style.
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
->>> fib.close()
Running after your tail with itertools.tee (new in version 2.4)
@@ -685,7 +678,8 @@ m235 to share a single generator".
... merge(times(3, m3),
... times(5, m5))):
... yield n
-... m2, m3, m5, mRes = tee(_m235(), 4)
+... m1 = _m235()
+... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
@@ -702,10 +696,9 @@ result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
-The beauty of it is that recursive running after their tail FP algorithms
+The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
-
Ye olde Fibonacci generator, tee style.
>>> def fib():
@@ -721,7 +714,8 @@ Ye olde Fibonacci generator, tee style.
... for res in _isum(fibHead, fibTail):
... yield res
...
-... fibHead, fibTail, fibRes = tee(_fib(), 3)
+... realfib = _fib()
+... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
@@ -1545,6 +1539,9 @@ caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
+>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
+caught ValueError (1)
+
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
@@ -1592,8 +1589,7 @@ ValueError: 7
>>> f().throw("abc") # throw on just-opened generator
Traceback (most recent call last):
...
-TypeError: exceptions must be classes, or instances, not str
-
+abc
Now let's try closing a generator:
@@ -1711,6 +1707,81 @@ enclosing function a generator:
"""
+refleaks_tests = """
+Prior to adding cycle-GC support to itertools.tee, this code would leak
+references. We add it to the standard suite so the routine refleak-tests
+would trigger if it starts being uncleanable again.
+
+>>> import itertools
+>>> def leak():
+... class gen:
+... def __iter__(self):
+... return self
+... def next(self):
+... return self.item
+... g = gen()
+... head, tail = itertools.tee(g)
+... g.item = head
+... return head
+>>> it = leak()
+
+Make sure to also test the involvement of the tee-internal teedataobject,
+which stores returned items.
+
+>>> item = it.next()
+
+
+
+This test leaked at one point due to generator finalization/destruction.
+It was copied from Lib/test/leakers/test_generator_cycle.py before the file
+was removed.
+
+>>> def leak():
+... def gen():
+... while True:
+... yield g
+... g = gen()
+
+>>> leak()
+
+
+
+This test isn't really generator related, but rather exception-in-cleanup
+related. The coroutine tests (above) just happen to cause an exception in
+the generator's __del__ (tp_del) method. We can also test for this
+explicitly, without generators. We do have to redirect stderr to avoid
+printing warnings and to doublecheck that we actually tested what we wanted
+to test.
+
+>>> import sys, StringIO
+>>> old = sys.stderr
+>>> try:
+... sys.stderr = StringIO.StringIO()
+... class Leaker:
+... def __del__(self):
+... raise RuntimeError
+...
+... l = Leaker()
+... del l
+... err = sys.stderr.getvalue().strip()
+... err.startswith(
+... "Exception exceptions.RuntimeError: RuntimeError() in <"
+... )
+... err.endswith("> ignored")
+... len(err.splitlines())
+... finally:
+... sys.stderr = old
+True
+True
+1
+
+
+
+These refleak tests should perhaps be in a testfile of their own,
+test_generators just happened to be the test that drew these out.
+
+"""
+
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
@@ -1719,6 +1790,7 @@ __test__ = {"tut": tutorial_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
+ "refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
diff --git a/Lib/test/test_genexps.py b/Lib/test/test_genexps.py
index 1556604..e414757 100644
--- a/Lib/test/test_genexps.py
+++ b/Lib/test/test_genexps.py
@@ -129,7 +129,7 @@ Verify late binding for the innermost for-expression
Verify re-use of tuples (a side benefit of using genexps over listcomps)
>>> tupleids = map(id, ((i,i) for i in xrange(10)))
- >>> max(tupleids) - min(tupleids)
+ >>> int(max(tupleids) - min(tupleids))
0
Verify that syntax error's are raised for genexps used as lvalues
diff --git a/Lib/test/test_getargs2.py b/Lib/test/test_getargs2.py
index d4c681a..748ad44 100644
--- a/Lib/test/test_getargs2.py
+++ b/Lib/test/test_getargs2.py
@@ -48,7 +48,7 @@ LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242L
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
- INT_MIN, LONG_MIN, LONG_MAX
+ INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
@@ -182,6 +182,23 @@ class Signed_TestCase(unittest.TestCase):
self.failUnlessEqual(42, getargs_l(42L))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
+ def test_n(self):
+ from _testcapi import getargs_n
+ # n returns 'Py_ssize_t', and does range checking
+ # (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
+ self.failUnlessEqual(3, getargs_n(3.14))
+ self.failUnlessEqual(99, getargs_n(Long()))
+ self.failUnlessEqual(99, getargs_n(Int()))
+
+ self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
+ self.failUnlessEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
+ self.failUnlessEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
+ self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
+
+ self.failUnlessEqual(42, getargs_n(42))
+ self.failUnlessEqual(42, getargs_n(42L))
+ self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
+
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
diff --git a/Lib/test/test_glob.py b/Lib/test/test_glob.py
index 8a6ef7f..5ce09f9 100644
--- a/Lib/test/test_glob.py
+++ b/Lib/test/test_glob.py
@@ -80,6 +80,14 @@ class GlobTests(unittest.TestCase):
eq(self.glob('?a?', '*F'), map(self.norm, [os.path.join('aaa', 'zzzF'),
os.path.join('aab', 'F')]))
+ def test_glob_directory_with_trailing_slash(self):
+ # We are verifying that when there is wildcard pattern which
+ # ends with os.sep doesn't blow up.
+ res = glob.glob(self.tempdir + '*' + os.sep)
+ self.assertEqual(len(res), 1)
+ # either of these results are reasonable
+ self.assertTrue(res[0] in [self.tempdir, self.tempdir + os.sep])
+
def test_glob_broken_symlinks(self):
if hasattr(os, 'symlink'):
eq = self.assertSequencesEqual_noorder
diff --git a/Lib/test/test_grammar.py b/Lib/test/test_grammar.py
index 5b20ab3..4bb4e45 100644
--- a/Lib/test/test_grammar.py
+++ b/Lib/test/test_grammar.py
@@ -255,6 +255,10 @@ d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
+def d31v((x)): pass
+d31v(1)
+def d32v((x,)): pass
+d32v((1,))
### lambdef: 'lambda' [varargslist] ':' test
print 'lambdef'
@@ -811,6 +815,11 @@ x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
verify([(i,j) for i in range(10) for j in range(5)] == list(g))
+# Grammar allows multiple adjacent 'if's in listcomps and genexps,
+# even though it's silly. Make sure it works (ifelse broke this.)
+verify([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+verify((x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
diff --git a/Lib/test/test_index.py b/Lib/test/test_index.py
index e69de29..45b3b2b 100644
--- a/Lib/test/test_index.py
+++ b/Lib/test/test_index.py
@@ -0,0 +1,137 @@
+import unittest
+from test import test_support
+import operator
+
+class oldstyle:
+ def __index__(self):
+ return self.ind
+
+class newstyle(object):
+ def __index__(self):
+ return self.ind
+
+class BaseTestCase(unittest.TestCase):
+ def setUp(self):
+ self.o = oldstyle()
+ self.n = newstyle()
+ self.o2 = oldstyle()
+ self.n2 = newstyle()
+
+ def test_basic(self):
+ self.o.ind = -2
+ self.n.ind = 2
+ assert(self.seq[self.n] == self.seq[2])
+ assert(self.seq[self.o] == self.seq[-2])
+ assert(operator.index(self.o) == -2)
+ assert(operator.index(self.n) == 2)
+
+ def test_error(self):
+ self.o.ind = 'dumb'
+ self.n.ind = 'bad'
+ myfunc = lambda x, obj: obj.seq[x]
+ self.failUnlessRaises(TypeError, operator.index, self.o)
+ self.failUnlessRaises(TypeError, operator.index, self.n)
+ self.failUnlessRaises(TypeError, myfunc, self.o, self)
+ self.failUnlessRaises(TypeError, myfunc, self.n, self)
+
+ def test_slice(self):
+ self.o.ind = 1
+ self.o2.ind = 3
+ self.n.ind = 2
+ self.n2.ind = 4
+ assert(self.seq[self.o:self.o2] == self.seq[1:3])
+ assert(self.seq[self.n:self.n2] == self.seq[2:4])
+
+ def test_repeat(self):
+ self.o.ind = 3
+ self.n.ind = 2
+ assert(self.seq * self.o == self.seq * 3)
+ assert(self.seq * self.n == self.seq * 2)
+ assert(self.o * self.seq == self.seq * 3)
+ assert(self.n * self.seq == self.seq * 2)
+
+ def test_wrappers(self):
+ n = self.n
+ n.ind = 5
+ assert n.__index__() == 5
+ assert 6 .__index__() == 6
+ assert -7L.__index__() == -7
+ assert self.seq.__getitem__(n) == self.seq[5]
+ assert self.seq.__mul__(n) == self.seq * 5
+ assert self.seq.__rmul__(n) == self.seq * 5
+
+ def test_infinite_recusion(self):
+ class Trap1(int):
+ def __index__(self):
+ return self
+ class Trap2(long):
+ def __index__(self):
+ return self
+ self.failUnlessRaises(TypeError, operator.getitem, self.seq, Trap1())
+ self.failUnlessRaises(TypeError, operator.getitem, self.seq, Trap2())
+
+
+class ListTestCase(BaseTestCase):
+ seq = [0,10,20,30,40,50]
+
+ def test_setdelitem(self):
+ self.o.ind = -2
+ self.n.ind = 2
+ lst = list('ab!cdefghi!j')
+ del lst[self.o]
+ del lst[self.n]
+ lst[self.o] = 'X'
+ lst[self.n] = 'Y'
+ assert lst == list('abYdefghXj')
+
+ lst = [5, 6, 7, 8, 9, 10, 11]
+ lst.__setitem__(self.n, "here")
+ assert lst == [5, 6, "here", 8, 9, 10, 11]
+ lst.__delitem__(self.n)
+ assert lst == [5, 6, 8, 9, 10, 11]
+
+ def test_inplace_repeat(self):
+ self.o.ind = 2
+ self.n.ind = 3
+ lst = [6, 4]
+ lst *= self.o
+ assert lst == [6, 4, 6, 4]
+ lst *= self.n
+ assert lst == [6, 4, 6, 4] * 3
+
+ lst = [5, 6, 7, 8, 9, 11]
+ l2 = lst.__imul__(self.n)
+ assert l2 is lst
+ assert lst == [5, 6, 7, 8, 9, 11] * 3
+
+
+class TupleTestCase(BaseTestCase):
+ seq = (0,10,20,30,40,50)
+
+class StringTestCase(BaseTestCase):
+ seq = "this is a test"
+
+class UnicodeTestCase(BaseTestCase):
+ seq = u"this is a test"
+
+
+class XRangeTestCase(unittest.TestCase):
+
+ def test_xrange(self):
+ n = newstyle()
+ n.ind = 5
+ assert xrange(1, 20)[n] == 6
+ assert xrange(1, 20).__getitem__(n) == 6
+
+
+def test_main():
+ test_support.run_unittest(
+ ListTestCase,
+ TupleTestCase,
+ StringTestCase,
+ UnicodeTestCase,
+ XRangeTestCase,
+ )
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py
index 0bdf959..d9fd93d 100644
--- a/Lib/test/test_inspect.py
+++ b/Lib/test/test_inspect.py
@@ -295,10 +295,12 @@ class TestClassesAndFunctions(unittest.TestCase):
self.assertArgSpecEquals(A.m, ['self'])
def test_getargspec_sublistofone(self):
- def sublistOfOne((foo)): return 1
-
+ def sublistOfOne((foo,)): return 1
self.assertArgSpecEquals(sublistOfOne, [['foo']])
+ def fakeSublistOfOne((foo)): return 1
+ self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
+
def test_classify_newstyle(self):
class A(object):
diff --git a/Lib/test/test_mimetypes.py b/Lib/test/test_mimetypes.py
index 5939ff5..8c584ad 100644
--- a/Lib/test/test_mimetypes.py
+++ b/Lib/test/test_mimetypes.py
@@ -8,6 +8,7 @@ from test import test_support
# Tell it we don't know about external files:
mimetypes.knownfiles = []
mimetypes.inited = False
+mimetypes._default_mime_types()
class MimeTypesTestCase(unittest.TestCase):
diff --git a/Lib/test/test_multibytecodec.py b/Lib/test/test_multibytecodec.py
index aef7931..4d02dee 100644
--- a/Lib/test/test_multibytecodec.py
+++ b/Lib/test/test_multibytecodec.py
@@ -7,13 +7,114 @@
from test import test_support
from test import test_multibytecodec_support
-import unittest, StringIO, codecs
+import unittest, StringIO, codecs, sys
+
+class Test_MultibyteCodec(unittest.TestCase):
+
+ def test_nullcoding(self):
+ self.assertEqual(''.decode('gb18030'), u'')
+ self.assertEqual(unicode('', 'gb18030'), u'')
+ self.assertEqual(u''.encode('gb18030'), '')
+
+ def test_str_decode(self):
+ self.assertEqual('abcd'.encode('gb18030'), 'abcd')
+
+ def test_errorcallback_longindex(self):
+ dec = codecs.getdecoder('euc-kr')
+ myreplace = lambda exc: (u'', sys.maxint+1)
+ codecs.register_error('test.cjktest', myreplace)
+ self.assertRaises(IndexError, dec,
+ 'apple\x92ham\x93spam', 'test.cjktest')
+
+class Test_IncrementalEncoder(unittest.TestCase):
+
+ def test_stateless(self):
+ # cp949 encoder isn't stateful at all.
+ encoder = codecs.getincrementalencoder('cp949')()
+ self.assertEqual(encoder.encode(u'\ud30c\uc774\uc36c \ub9c8\uc744'),
+ '\xc6\xc4\xc0\xcc\xbd\xe3 \xb8\xb6\xc0\xbb')
+ self.assertEqual(encoder.reset(), None)
+ self.assertEqual(encoder.encode(u'\u2606\u223c\u2606', True),
+ '\xa1\xd9\xa1\xad\xa1\xd9')
+ self.assertEqual(encoder.reset(), None)
+ self.assertEqual(encoder.encode(u'', True), '')
+ self.assertEqual(encoder.encode(u'', False), '')
+ self.assertEqual(encoder.reset(), None)
+
+ def test_stateful(self):
+ # jisx0213 encoder is stateful for a few codepoints. eg)
+ # U+00E6 => A9DC
+ # U+00E6 U+0300 => ABC4
+ # U+0300 => ABDC
+
+ encoder = codecs.getincrementalencoder('jisx0213')()
+ self.assertEqual(encoder.encode(u'\u00e6\u0300'), '\xab\xc4')
+ self.assertEqual(encoder.encode(u'\u00e6'), '')
+ self.assertEqual(encoder.encode(u'\u0300'), '\xab\xc4')
+ self.assertEqual(encoder.encode(u'\u00e6', True), '\xa9\xdc')
+
+ self.assertEqual(encoder.reset(), None)
+ self.assertEqual(encoder.encode(u'\u0300'), '\xab\xdc')
+
+ self.assertEqual(encoder.encode(u'\u00e6'), '')
+ self.assertEqual(encoder.encode('', True), '\xa9\xdc')
+ self.assertEqual(encoder.encode('', True), '')
+
+ def test_stateful_keep_buffer(self):
+ encoder = codecs.getincrementalencoder('jisx0213')()
+ self.assertEqual(encoder.encode(u'\u00e6'), '')
+ self.assertRaises(UnicodeEncodeError, encoder.encode, u'\u0123')
+ self.assertEqual(encoder.encode(u'\u0300\u00e6'), '\xab\xc4')
+ self.assertRaises(UnicodeEncodeError, encoder.encode, u'\u0123')
+ self.assertEqual(encoder.reset(), None)
+ self.assertEqual(encoder.encode(u'\u0300'), '\xab\xdc')
+ self.assertEqual(encoder.encode(u'\u00e6'), '')
+ self.assertRaises(UnicodeEncodeError, encoder.encode, u'\u0123')
+ self.assertEqual(encoder.encode(u'', True), '\xa9\xdc')
+
+
+class Test_IncrementalDecoder(unittest.TestCase):
+
+ def test_dbcs(self):
+ # cp949 decoder is simple with only 1 or 2 bytes sequences.
+ decoder = codecs.getincrementaldecoder('cp949')()
+ self.assertEqual(decoder.decode('\xc6\xc4\xc0\xcc\xbd'),
+ u'\ud30c\uc774')
+ self.assertEqual(decoder.decode('\xe3 \xb8\xb6\xc0\xbb'),
+ u'\uc36c \ub9c8\uc744')
+ self.assertEqual(decoder.decode(''), u'')
+
+ def test_dbcs_keep_buffer(self):
+ decoder = codecs.getincrementaldecoder('cp949')()
+ self.assertEqual(decoder.decode('\xc6\xc4\xc0'), u'\ud30c')
+ self.assertRaises(UnicodeDecodeError, decoder.decode, '', True)
+ self.assertEqual(decoder.decode('\xcc'), u'\uc774')
+
+ self.assertEqual(decoder.decode('\xc6\xc4\xc0'), u'\ud30c')
+ self.assertRaises(UnicodeDecodeError, decoder.decode, '\xcc\xbd', True)
+ self.assertEqual(decoder.decode('\xcc'), u'\uc774')
+
+ def test_iso2022(self):
+ decoder = codecs.getincrementaldecoder('iso2022-jp')()
+ ESC = '\x1b'
+ self.assertEqual(decoder.decode(ESC + '('), u'')
+ self.assertEqual(decoder.decode('B', True), u'')
+ self.assertEqual(decoder.decode(ESC + '$'), u'')
+ self.assertEqual(decoder.decode('B@$'), u'\u4e16')
+ self.assertEqual(decoder.decode('@$@'), u'\u4e16')
+ self.assertEqual(decoder.decode('$', True), u'\u4e16')
+ self.assertEqual(decoder.reset(), None)
+ self.assertEqual(decoder.decode('@$'), u'@$')
+ self.assertEqual(decoder.decode(ESC + '$'), u'')
+ self.assertRaises(UnicodeDecodeError, decoder.decode, '', True)
+ self.assertEqual(decoder.decode('B@$'), u'\u4e16')
+
class Test_StreamWriter(unittest.TestCase):
if len(u'\U00012345') == 2: # UCS2
def test_gb18030(self):
s= StringIO.StringIO()
- c = codecs.lookup('gb18030')[3](s)
+ c = codecs.getwriter('gb18030')(s)
c.write(u'123')
self.assertEqual(s.getvalue(), '123')
c.write(u'\U00012345')
@@ -30,15 +131,16 @@ class Test_StreamWriter(unittest.TestCase):
self.assertEqual(s.getvalue(),
'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
- # standard utf-8 codecs has broken StreamReader
- if test_multibytecodec_support.__cjkcodecs__:
- def test_utf_8(self):
- s= StringIO.StringIO()
- c = codecs.lookup('utf-8')[3](s)
- c.write(u'123')
- self.assertEqual(s.getvalue(), '123')
- c.write(u'\U00012345')
- self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
+ def test_utf_8(self):
+ s= StringIO.StringIO()
+ c = codecs.getwriter('utf-8')(s)
+ c.write(u'123')
+ self.assertEqual(s.getvalue(), '123')
+ c.write(u'\U00012345')
+ self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
+
+ # Python utf-8 codec can't buffer surrogate pairs yet.
+ if 0:
c.write(u'\U00012345'[0])
self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
@@ -61,14 +163,6 @@ class Test_StreamWriter(unittest.TestCase):
else: # UCS4
pass
- def test_nullcoding(self):
- self.assertEqual(''.decode('gb18030'), u'')
- self.assertEqual(unicode('', 'gb18030'), u'')
- self.assertEqual(u''.encode('gb18030'), '')
-
- def test_str_decode(self):
- self.assertEqual('abcd'.encode('gb18030'), 'abcd')
-
def test_streamwriter_strwrite(self):
s = StringIO.StringIO()
wr = codecs.getwriter('gb18030')(s)
@@ -83,6 +177,9 @@ class Test_ISO2022(unittest.TestCase):
def test_main():
suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(Test_MultibyteCodec))
+ suite.addTest(unittest.makeSuite(Test_IncrementalEncoder))
+ suite.addTest(unittest.makeSuite(Test_IncrementalDecoder))
suite.addTest(unittest.makeSuite(Test_StreamWriter))
suite.addTest(unittest.makeSuite(Test_ISO2022))
test_support.run_suite(suite)
diff --git a/Lib/test/test_multibytecodec_support.py b/Lib/test/test_multibytecodec_support.py
index 45a63e7..bec32de 100644
--- a/Lib/test/test_multibytecodec_support.py
+++ b/Lib/test/test_multibytecodec_support.py
@@ -3,15 +3,12 @@
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
-# $CJKCodecs: test_multibytecodec_support.py,v 1.6 2004/06/19 06:09:55 perky Exp $
import sys, codecs, os.path
import unittest
from test import test_support
from StringIO import StringIO
-__cjkcodecs__ = 0 # define this as 0 for python
-
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
@@ -21,11 +18,17 @@ class TestBase:
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
+ unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
- self.encode, self.decode, self.reader, self.writer = self.codec
+ self.encode = self.codec.encode
+ self.decode = self.codec.decode
+ self.reader = self.codec.streamreader
+ self.writer = self.codec.streamwriter
+ self.incrementalencoder = self.codec.incrementalencoder
+ self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
@@ -47,51 +50,155 @@ class TestBase:
else:
self.assertRaises(UnicodeError, func, source, scheme)
- if sys.hexversion >= 0x02030000:
- def test_xmlcharrefreplace(self):
- if self.has_iso10646:
- return
+ def test_xmlcharrefreplace(self):
+ if self.has_iso10646:
+ return
+
+ s = u"\u0b13\u0b23\u0b60 nd eggs"
+ self.assertEqual(
+ self.encode(s, "xmlcharrefreplace")[0],
+ "&#2835;&#2851;&#2912; nd eggs"
+ )
+
+ def test_customreplace_encode(self):
+ if self.has_iso10646:
+ return
+
+ from htmlentitydefs import codepoint2name
+
+ def xmlcharnamereplace(exc):
+ if not isinstance(exc, UnicodeEncodeError):
+ raise TypeError("don't know how to handle %r" % exc)
+ l = []
+ for c in exc.object[exc.start:exc.end]:
+ if ord(c) in codepoint2name:
+ l.append(u"&%s;" % codepoint2name[ord(c)])
+ else:
+ l.append(u"&#%d;" % ord(c))
+ return (u"".join(l), exc.end)
+
+ codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
- s = u"\u0b13\u0b23\u0b60 nd eggs"
- self.assertEqual(
- self.encode(s, "xmlcharrefreplace")[0],
- "&#2835;&#2851;&#2912; nd eggs"
- )
+ if self.xmlcharnametest:
+ sin, sout = self.xmlcharnametest
+ else:
+ sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
+ sout = "&laquo;&real;&raquo; = &lang;&#4660;&rang;"
+ self.assertEqual(self.encode(sin,
+ "test.xmlcharnamereplace")[0], sout)
+
+ def test_callback_wrong_objects(self):
+ def myreplace(exc):
+ return (ret, exc.end)
+ codecs.register_error("test.cjktest", myreplace)
+
+ for ret in ([1, 2, 3], [], None, object(), 'string', ''):
+ self.assertRaises(TypeError, self.encode, self.unmappedunicode,
+ 'test.cjktest')
+
+ def test_callback_long_index(self):
+ def myreplace(exc):
+ return (u'x', long(exc.end))
+ codecs.register_error("test.cjktest", myreplace)
+ self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+ 'test.cjktest'), ('abcdxefgh', 9))
+
+ def myreplace(exc):
+ return (u'x', sys.maxint + 1)
+ codecs.register_error("test.cjktest", myreplace)
+ self.assertRaises(IndexError, self.encode, self.unmappedunicode,
+ 'test.cjktest')
+
+ def test_callback_None_index(self):
+ def myreplace(exc):
+ return (u'x', None)
+ codecs.register_error("test.cjktest", myreplace)
+ self.assertRaises(TypeError, self.encode, self.unmappedunicode,
+ 'test.cjktest')
+
+ def test_callback_backward_index(self):
+ def myreplace(exc):
+ if myreplace.limit > 0:
+ myreplace.limit -= 1
+ return (u'REPLACED', 0)
+ else:
+ return (u'TERMINAL', exc.end)
+ myreplace.limit = 3
+ codecs.register_error("test.cjktest", myreplace)
+ self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+ 'test.cjktest'),
+ ('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
+
+ def test_callback_forward_index(self):
+ def myreplace(exc):
+ return (u'REPLACED', exc.end + 2)
+ codecs.register_error("test.cjktest", myreplace)
+ self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+ 'test.cjktest'), ('abcdREPLACEDgh', 9))
+
+ def test_callback_index_outofbound(self):
+ def myreplace(exc):
+ return (u'TERM', 100)
+ codecs.register_error("test.cjktest", myreplace)
+ self.assertRaises(IndexError, self.encode, self.unmappedunicode,
+ 'test.cjktest')
+
+ def test_incrementalencoder(self):
+ UTF8Reader = codecs.getreader('utf-8')
+ for sizehint in [None] + range(1, 33) + \
+ [64, 128, 256, 512, 1024]:
+ istream = UTF8Reader(StringIO(self.tstring[1]))
+ ostream = StringIO()
+ encoder = self.incrementalencoder()
+ while 1:
+ if sizehint is not None:
+ data = istream.read(sizehint)
+ else:
+ data = istream.read()
- def test_customreplace(self):
- if self.has_iso10646:
- return
+ if not data:
+ break
+ e = encoder.encode(data)
+ ostream.write(e)
- import htmlentitydefs
+ self.assertEqual(ostream.getvalue(), self.tstring[0])
- names = {}
- for (key, value) in htmlentitydefs.entitydefs.items():
- if len(value)==1:
- names[value.decode('latin-1')] = self.decode(key)[0]
+ def test_incrementaldecoder(self):
+ UTF8Writer = codecs.getwriter('utf-8')
+ for sizehint in [None, -1] + range(1, 33) + \
+ [64, 128, 256, 512, 1024]:
+ istream = StringIO(self.tstring[0])
+ ostream = UTF8Writer(StringIO())
+ decoder = self.incrementaldecoder()
+ while 1:
+ data = istream.read(sizehint)
+ if not data:
+ break
else:
- names[unichr(int(value[2:-1]))] = self.decode(key)[0]
-
- def xmlcharnamereplace(exc):
- if not isinstance(exc, UnicodeEncodeError):
- raise TypeError("don't know how to handle %r" % exc)
- l = []
- for c in exc.object[exc.start:exc.end]:
- try:
- l.append(u"&%s;" % names[c])
- except KeyError:
- l.append(u"&#%d;" % ord(c))
- return (u"".join(l), exc.end)
-
- codecs.register_error(
- "test.xmlcharnamereplace", xmlcharnamereplace)
-
- if self.xmlcharnametest:
- sin, sout = self.xmlcharnametest
- else:
- sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
- sout = "&laquo;&real;&raquo; = &lang;&#4660;&rang;"
- self.assertEqual(self.encode(sin,
- "test.xmlcharnamereplace")[0], sout)
+ u = decoder.decode(data)
+ ostream.write(u)
+
+ self.assertEqual(ostream.getvalue(), self.tstring[1])
+
+ def test_incrementalencoder_error_callback(self):
+ inv = self.unmappedunicode
+
+ e = self.incrementalencoder()
+ self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
+
+ e.errors = 'ignore'
+ self.assertEqual(e.encode(inv, True), '')
+
+ e.reset()
+ def tempreplace(exc):
+ return (u'called', exc.end)
+ codecs.register_error('test.incremental_error_callback', tempreplace)
+ e.errors = 'test.incremental_error_callback'
+ self.assertEqual(e.encode(inv, True), 'called')
+
+ # again
+ e.errors = 'ignore'
+ self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
@@ -113,11 +220,7 @@ class TestBase:
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
- if __cjkcodecs__:
- readfuncs = ('read', 'readline', 'readlines')
- else:
- # standard utf8 codec has broken readline and readlines.
- readfuncs = ('read',)
+ readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + range(1, 33) + \
@@ -211,10 +314,5 @@ class TestBase_Mapping(unittest.TestCase):
self.assertEqual(unicode(csetch, self.encoding), unich)
def load_teststring(encoding):
- if __cjkcodecs__:
- etxt = open(os.path.join('sampletexts', encoding) + '.txt').read()
- utxt = open(os.path.join('sampletexts', encoding) + '.utf8').read()
- return (etxt, utxt)
- else:
- from test import cjkencodings_test
- return cjkencodings_test.teststring[encoding]
+ from test import cjkencodings_test
+ return cjkencodings_test.teststring[encoding]
diff --git a/Lib/test/test_optparse.py b/Lib/test/test_optparse.py
index cf83d75..f656b9f 100644
--- a/Lib/test/test_optparse.py
+++ b/Lib/test/test_optparse.py
@@ -1382,8 +1382,15 @@ options:
class TestHelp(BaseTest):
def setUp(self):
+ self.orig_columns = os.environ.get('COLUMNS')
self.parser = self.make_parser(80)
+ def tearDown(self):
+ if self.orig_columns is None:
+ del os.environ['COLUMNS']
+ else:
+ os.environ['COLUMNS'] = self.orig_columns
+
def make_parser(self, columns):
options = [
make_option("-a", type="string", dest='a',
diff --git a/Lib/test/test_parser.py b/Lib/test/test_parser.py
index 771fe9d..8aa1657 100644
--- a/Lib/test/test_parser.py
+++ b/Lib/test/test_parser.py
@@ -51,6 +51,10 @@ class RoundtripLegalSyntaxTestCase(unittest.TestCase):
self.check_expr("[1, 2, 3]")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
+ self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
+ self.check_expr("list(x**3 for x in range(20))")
+ self.check_expr("list(x**3 for x in range(20) if x % 3)")
+ self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
diff --git a/Lib/test/test_platform.py b/Lib/test/test_platform.py
index 200fba5..22307cd 100644
--- a/Lib/test/test_platform.py
+++ b/Lib/test/test_platform.py
@@ -63,7 +63,12 @@ class PlatformTest(unittest.TestCase):
res = platform.dist()
def test_libc_ver(self):
- res = platform.libc_ver()
+ from sys import executable
+ import os
+ if os.path.isdir(executable) and os.path.exists(executable+'.exe'):
+ # Cygwin horror
+ executable = executable + '.exe'
+ res = platform.libc_ver(executable)
def test_main():
test_support.run_unittest(
diff --git a/Lib/test/test_popen2.py b/Lib/test/test_popen2.py
index 18142ec..4db3cd1 100644
--- a/Lib/test/test_popen2.py
+++ b/Lib/test/test_popen2.py
@@ -35,6 +35,9 @@ def _test():
# same test as popen2._test(), but using the os.popen*() API
print "Testing os module:"
import popen2
+ # When the test runs, there shouldn't be any open pipes
+ popen2._cleanup()
+ assert not popen2._active, "Active pipes when test starts " + repr([c.cmd for c in popen2._active])
cmd = "cat"
teststr = "ab cd\n"
if os.name == "nt":
@@ -65,6 +68,7 @@ def _test():
raise ValueError("unexpected %r on stderr" % (got,))
for inst in popen2._active[:]:
inst.wait()
+ popen2._cleanup()
if popen2._active:
raise ValueError("_active not empty")
print "All OK"
diff --git a/Lib/test/test_posix.py b/Lib/test/test_posix.py
index 1ccc62b..f98c723 100644
--- a/Lib/test/test_posix.py
+++ b/Lib/test/test_posix.py
@@ -73,6 +73,11 @@ class PosixTester(unittest.TestCase):
finally:
fp.close()
+ def test_confstr(self):
+ if hasattr(posix, 'confstr'):
+ self.assertRaises(ValueError, posix.confstr, "CS_garbage")
+ self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
+
def test_dup2(self):
if hasattr(posix, 'dup2'):
fp1 = open(test_support.TESTFN)
diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py
index f8ae479..99e01b6 100644
--- a/Lib/test/test_pty.py
+++ b/Lib/test/test_pty.py
@@ -18,6 +18,27 @@ else:
def debug(msg):
pass
+def normalize_output(data):
+ # Some operating systems do conversions on newline. We could possibly
+ # fix that by doing the appropriate termios.tcsetattr()s. I couldn't
+ # figure out the right combo on Tru64 and I don't have an IRIX box.
+ # So just normalize the output and doc the problem O/Ses by allowing
+ # certain combinations for some platforms, but avoid allowing other
+ # differences (like extra whitespace, trailing garbage, etc.)
+
+ # This is about the best we can do without getting some feedback
+ # from someone more knowledgable.
+
+ # OSF/1 (Tru64) apparently turns \n into \r\r\n.
+ if data.endswith('\r\r\n'):
+ return data[:-3] + '\n'
+
+ # IRIX apparently turns \n into \r\n.
+ if data.endswith('\r\n'):
+ return data[:-2] + '\n'
+
+ return data
+
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
@@ -36,19 +57,16 @@ def test_basic_pty():
if not os.isatty(slave_fd) and sys.platform not in fickle_isatty:
raise TestFailed, "slave_fd is not a tty"
- # IRIX apparently turns \n into \r\n. Allow that, but avoid allowing other
- # differences (like extra whitespace, trailing garbage, etc.)
-
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
- sys.stdout.write(s1.replace("\r\n", "\n"))
+ sys.stdout.write(normalize_output(s1))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
- sys.stdout.write(s2.replace("\r\n", "\n"))
+ sys.stdout.write(normalize_output(s2))
os.close(slave_fd)
os.close(master_fd)
diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py
index 0eb7d90..2410b03 100644
--- a/Lib/test/test_pyclbr.py
+++ b/Lib/test/test_pyclbr.py
@@ -97,6 +97,9 @@ class PyclbrTest(TestCase):
self.assert_(isinstance(py_item, (FunctionType, BuiltinFunctionType)))
else:
self.failUnless(isinstance(py_item, (ClassType, type)))
+ if py_item.__module__ != moduleName:
+ continue # skip classes that came from somewhere else
+
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
@@ -172,7 +175,7 @@ class PyclbrTest(TestCase):
cm('pydoc')
# Tests for modules inside packages
- cm('email.Parser')
+ cm('email.parser')
cm('test.test_pyclbr')
diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py
index b55dd01..66977e6 100644
--- a/Lib/test/test_queue.py
+++ b/Lib/test/test_queue.py
@@ -221,7 +221,51 @@ def SimpleQueueTest(q):
_doBlockingTest(q.get, (), q.put, ('empty',))
_doBlockingTest(q.get, (True, 10), q.put, ('empty',))
+cum = 0
+cumlock = threading.Lock()
+
+def worker(q):
+ global cum
+ while True:
+ x = q.get()
+ if x is None:
+ q.task_done()
+ return
+ cumlock.acquire()
+ try:
+ cum += x
+ finally:
+ cumlock.release()
+ q.task_done()
+
+def QueueJoinTest(q):
+ global cum
+ cum = 0
+ for i in (0,1):
+ threading.Thread(target=worker, args=(q,)).start()
+ for i in xrange(100):
+ q.put(i)
+ q.join()
+ verify(cum==sum(range(100)), "q.join() did not block until all tasks were done")
+ for i in (0,1):
+ q.put(None) # instruct the threads to close
+ q.join() # verify that you can join twice
+
+def QueueTaskDoneTest(q):
+ try:
+ q.task_done()
+ except ValueError:
+ pass
+ else:
+ raise TestFailed("Did not detect task count going negative")
+
def test():
+ q = Queue.Queue()
+ QueueTaskDoneTest(q)
+ QueueJoinTest(q)
+ QueueJoinTest(q)
+ QueueTaskDoneTest(q)
+
q = Queue.Queue(QUEUE_SIZE)
# Do it a couple of times on the same queue
SimpleQueueTest(q)
diff --git a/Lib/test/test_quopri.py b/Lib/test/test_quopri.py
index ed66dfc..631c974 100644
--- a/Lib/test/test_quopri.py
+++ b/Lib/test/test_quopri.py
@@ -1,7 +1,7 @@
from test import test_support
import unittest
-import sys, os, cStringIO
+import sys, os, cStringIO, subprocess
import quopri
@@ -176,17 +176,20 @@ zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''')
def test_scriptencode(self):
(p, e) = self.STRINGS[-1]
- (cin, cout) = os.popen2("%s -mquopri" % sys.executable)
- cin.write(p)
- cin.close()
- self.assert_(cout.read() == e)
+ process = subprocess.Popen([sys.executable, "-mquopri"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ cout, cerr = process.communicate(p)
+ # On Windows, Python will output the result to stdout using
+ # CRLF, as the mode of stdout is text mode. To compare this
+ # with the expected result, we need to do a line-by-line comparison.
+ self.assert_(cout.splitlines() == e.splitlines())
def test_scriptdecode(self):
(p, e) = self.STRINGS[-1]
- (cin, cout) = os.popen2("%s -mquopri -d" % sys.executable)
- cin.write(e)
- cin.close()
- self.assert_(cout.read() == p)
+ process = subprocess.Popen([sys.executable, "-mquopri", "-d"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ cout, cerr = process.communicate(e)
+ self.assert_(cout.splitlines() == p.splitlines())
def test_main():
test_support.run_unittest(QuopriTestCase)
diff --git a/Lib/test/test_random.py b/Lib/test/test_random.py
index 9c2e0d0..bba4c7c 100644
--- a/Lib/test/test_random.py
+++ b/Lib/test/test_random.py
@@ -93,10 +93,29 @@ class TestBasicOps(unittest.TestCase):
self.gen.sample(set(range(20)), 2)
self.gen.sample(range(20), 2)
self.gen.sample(xrange(20), 2)
- self.gen.sample(dict.fromkeys('abcdefghijklmnopqrst'), 2)
self.gen.sample(str('abcdefghijklmnopqrst'), 2)
self.gen.sample(tuple('abcdefghijklmnopqrst'), 2)
+ def test_sample_on_dicts(self):
+ self.gen.sample(dict.fromkeys('abcdefghijklmnopqrst'), 2)
+
+ # SF bug #1460340 -- random.sample can raise KeyError
+ a = dict.fromkeys(range(10)+range(10,100,2)+range(100,110))
+ self.gen.sample(a, 3)
+
+ # A followup to bug #1460340: sampling from a dict could return
+ # a subset of its keys or of its values, depending on the size of
+ # the subset requested.
+ N = 30
+ d = dict((i, complex(i, i)) for i in xrange(N))
+ for k in xrange(N+1):
+ samp = self.gen.sample(d, k)
+ # Verify that we got ints back (keys); the values are complex.
+ for x in samp:
+ self.assert_(type(x) is int)
+ samp.sort()
+ self.assertEqual(samp, range(N))
+
def test_gauss(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
diff --git a/Lib/test/test_regex.py b/Lib/test/test_regex.py
deleted file mode 100644
index 2e2c8f65..0000000
--- a/Lib/test/test_regex.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from test.test_support import verbose, sortdict
-import warnings
-warnings.filterwarnings("ignore", "the regex module is deprecated",
- DeprecationWarning, __name__)
-import regex
-from regex_syntax import *
-
-re = 'a+b+c+'
-print 'no match:', regex.match(re, 'hello aaaabcccc world')
-print 'successful search:', regex.search(re, 'hello aaaabcccc world')
-try:
- cre = regex.compile('\(' + re)
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-prev = regex.set_syntax(RE_SYNTAX_AWK)
-print 'successful awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-regex.set_syntax(prev)
-print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
-
-re = '\(<one>[0-9]+\) *\(<two>[0-9]+\)'
-print 'matching with group names and compile()'
-cre = regex.compile(re)
-print cre.match('801 999')
-try:
- print cre.group('one')
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-print 'matching with group names and symcomp()'
-cre = regex.symcomp(re)
-print cre.match('801 999')
-print cre.group(0)
-print cre.group('one')
-print cre.group(1, 2)
-print cre.group('one', 'two')
-print 'realpat:', cre.realpat
-print 'groupindex:', sortdict(cre.groupindex)
-
-re = 'world'
-cre = regex.compile(re)
-print 'not case folded search:', cre.search('HELLO WORLD')
-cre = regex.compile(re, regex.casefold)
-print 'case folded search:', cre.search('HELLO WORLD')
-
-print '__members__:', cre.__members__
-print 'regs:', cre.regs
-print 'last:', cre.last
-print 'translate:', len(cre.translate)
-print 'givenpat:', cre.givenpat
-
-print 'match with pos:', cre.match('hello world', 7)
-print 'search with pos:', cre.search('hello world there world', 7)
-print 'bogus group:', cre.group(0, 1, 3)
-try:
- print 'no name:', cre.group('one')
-except regex.error:
- print 'caught expected exception'
-else:
- print 'expected regex.error not raised'
-
-from regex_tests import *
-if verbose: print 'Running regex_tests test suite'
-
-for t in tests:
- pattern=s=outcome=repl=expected=None
- if len(t)==5:
- pattern, s, outcome, repl, expected = t
- elif len(t)==3:
- pattern, s, outcome = t
- else:
- raise ValueError, ('Test tuples should have 3 or 5 fields',t)
-
- try:
- obj=regex.compile(pattern)
- except regex.error:
- if outcome==SYNTAX_ERROR: pass # Expected a syntax error
- else:
- # Regex syntax errors aren't yet reported, so for
- # the official test suite they'll be quietly ignored.
- pass
- #print '=== Syntax error:', t
- else:
- try:
- result=obj.search(s)
- except regex.error, msg:
- print '=== Unexpected exception', t, repr(msg)
- if outcome==SYNTAX_ERROR:
- # This should have been a syntax error; forget it.
- pass
- elif outcome==FAIL:
- if result==-1: pass # No match, as expected
- else: print '=== Succeeded incorrectly', t
- elif outcome==SUCCEED:
- if result!=-1:
- # Matched, as expected, so now we compute the
- # result string and compare it to our expected result.
- start, end = obj.regs[0]
- found=s[start:end]
- groups=obj.group(1,2,3,4,5,6,7,8,9,10)
- vardict=vars()
- for i in range(len(groups)):
- vardict['g'+str(i+1)]=str(groups[i])
- repl=eval(repl)
- if repl!=expected:
- print '=== grouping error', t, repr(repl)+' should be '+repr(expected)
- else:
- print '=== Failed incorrectly', t
diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py
index 6ff1215..0268be2 100644
--- a/Lib/test/test_set.py
+++ b/Lib/test/test_set.py
@@ -224,7 +224,7 @@ class TestJointOps(unittest.TestCase):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
- return id(self)
+ return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
@@ -421,7 +421,7 @@ class TestSet(TestJointOps):
self.assertRaises(ReferenceError, str, p)
# C API test only available in a debug build
- if hasattr(sys, "gettotalrefcount"):
+ if hasattr(set, "test_c_api"):
def test_c_api(self):
self.assertEqual(set('abc').test_c_api(), True)
diff --git a/Lib/test/test_setuptools.py b/Lib/test/test_setuptools.py
new file mode 100644
index 0000000..a988303
--- /dev/null
+++ b/Lib/test/test_setuptools.py
@@ -0,0 +1,16 @@
+"""Tests for setuptools.
+
+The tests for setuptools are defined in the setuptools.tests package;
+this runs them from there.
+"""
+
+import test.test_support
+from setuptools.command.test import ScanningLoader
+
+def test_main():
+ test.test_support.run_suite(
+ ScanningLoader().loadTestsFromName('setuptools.tests')
+ )
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_sgmllib.py b/Lib/test/test_sgmllib.py
index bc25bd0..8e8b02f 100644
--- a/Lib/test/test_sgmllib.py
+++ b/Lib/test/test_sgmllib.py
@@ -214,6 +214,20 @@ DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
("starttag", "e", [("a", "rgb(1,2,3)")]),
])
+ def test_attr_values_entities(self):
+ """Substitution of entities and charrefs in attribute values"""
+ # SF bug #1452246
+ self.check_events("""<a b=&lt; c=&lt;&gt; d=&lt-&gt; e='&lt; '
+ f="&xxx;" g='&#32;&#33;' h='&#500;' i='x?a=b&c=d;'>""",
+ [("starttag", "a", [("b", "<"),
+ ("c", "<>"),
+ ("d", "&lt->"),
+ ("e", "< "),
+ ("f", "&xxx;"),
+ ("g", " !"),
+ ("h", "&#500;"),
+ ("i", "x?a=b&c=d;"), ])])
+
def test_attr_funky_names(self):
self.check_events("""<a a.b='v' c:d=v e-f=v>""", [
("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")]),
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
index 1899e78..6943080 100644
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -268,9 +268,9 @@ class GeneralModuleTests(unittest.TestCase):
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
- fqhn = socket.getfqdn()
+ fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
- self.fail("Error testing host resolution mechanisms.")
+ self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
@@ -469,6 +469,14 @@ class GeneralModuleTests(unittest.TestCase):
sock.close()
self.assertRaises(socket.error, sock.send, "spam")
+ def testNewAttributes(self):
+ # testing .family, .type and .protocol
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.assertEqual(sock.family, socket.AF_INET)
+ self.assertEqual(sock.type, socket.SOCK_STREAM)
+ self.assertEqual(sock.proto, 0)
+ sock.close()
+
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
@@ -817,6 +825,32 @@ class TestExceptions(unittest.TestCase):
self.assert_(issubclass(socket.gaierror, socket.error))
self.assert_(issubclass(socket.timeout, socket.error))
+class TestLinuxAbstractNamespace(unittest.TestCase):
+
+ UNIX_PATH_MAX = 108
+
+ def testLinuxAbstractNamespace(self):
+ address = "\x00python-test-hello\x00\xff"
+ s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s1.bind(address)
+ s1.listen(1)
+ s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s2.connect(s1.getsockname())
+ s1.accept()
+ self.assertEqual(s1.getsockname(), address)
+ self.assertEqual(s2.getpeername(), address)
+
+ def testMaxName(self):
+ address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1)
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.bind(address)
+ self.assertEqual(s.getsockname(), address)
+
+ def testNameOverflow(self):
+ address = "\x00" + "h" * self.UNIX_PATH_MAX
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.assertRaises(socket.error, s.bind, address)
+
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPTimeoutTest, TestExceptions]
@@ -832,6 +866,8 @@ def test_main():
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
+ if sys.platform == 'linux2':
+ tests.append(TestLinuxAbstractNamespace)
test_support.run_unittest(*tests)
if __name__ == "__main__":
diff --git a/Lib/test/test_socket_ssl.py b/Lib/test/test_socket_ssl.py
index 98680b9..1091383 100644
--- a/Lib/test/test_socket_ssl.py
+++ b/Lib/test/test_socket_ssl.py
@@ -1,5 +1,6 @@
# Test just the SSL support in the socket module, in a moderately bogus way.
+import sys
from test import test_support
import socket
@@ -13,6 +14,9 @@ def test_basic():
import urllib
+ if test_support.verbose:
+ print "test_basic ..."
+
socket.RAND_status()
try:
socket.RAND_egd(1)
@@ -26,7 +30,41 @@ def test_basic():
buf = f.read()
f.close()
+def test_timeout():
+ test_support.requires('network')
+
+ if test_support.verbose:
+ print "test_timeout ..."
+
+ # A service which issues a welcome banner (without need to write
+ # anything).
+ # XXX ("gmail.org", 995) has been unreliable so far, from time to time
+ # XXX non-responsive for hours on end (& across all buildbot slaves,
+ # XXX so that's not just a local thing).
+ ADDR = "gmail.org", 995
+
+ s = socket.socket()
+ s.settimeout(30.0)
+ try:
+ s.connect(ADDR)
+ except socket.timeout:
+ print >> sys.stderr, """\
+ WARNING: an attempt to connect to %r timed out, in
+ test_timeout. That may be legitimate, but is not the outcome we hoped
+ for. If this message is seen often, test_timeout should be changed to
+ use a more reliable address.""" % (ADDR,)
+ return
+
+ ss = socket.ssl(s)
+ # Read part of return welcome banner twice.
+ ss.read(1)
+ ss.read(1)
+ s.close()
+
def test_rude_shutdown():
+ if test_support.verbose:
+ print "test_rude_shutdown ..."
+
try:
import threading
except ImportError:
@@ -74,6 +112,7 @@ def test_main():
raise test_support.TestSkipped("socket module has no ssl support")
test_rude_shutdown()
test_basic()
+ test_timeout()
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_sqlite.py b/Lib/test/test_sqlite.py
new file mode 100644
index 0000000..1b1d0e5
--- /dev/null
+++ b/Lib/test/test_sqlite.py
@@ -0,0 +1,16 @@
+from test.test_support import run_unittest, TestSkipped
+import unittest
+
+try:
+ import _sqlite3
+except ImportError:
+ raise TestSkipped('no sqlite available')
+from sqlite3.test import (dbapi, types, userfunctions,
+ factory, transactions)
+
+def test_main():
+ run_unittest(dbapi.suite(), types.suite(), userfunctions.suite(),
+ factory.suite(), transactions.suite())
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_startfile.py b/Lib/test/test_startfile.py
new file mode 100644
index 0000000..c4d12d7
--- /dev/null
+++ b/Lib/test/test_startfile.py
@@ -0,0 +1,37 @@
+# Ridiculously simple test of the os.startfile function for Windows.
+#
+# empty.vbs is an empty file (except for a comment), which does
+# nothing when run with cscript or wscript.
+#
+# A possible improvement would be to have empty.vbs do something that
+# we can detect here, to make sure that not only the os.startfile()
+# call succeeded, but also the the script actually has run.
+
+import unittest
+from test import test_support
+
+# use this form so that the test is skipped when startfile is not available:
+from os import startfile, path
+
+class TestCase(unittest.TestCase):
+ def test_nonexisting(self):
+ self.assertRaises(OSError, startfile, "nonexisting.vbs")
+
+ def test_nonexisting_u(self):
+ self.assertRaises(OSError, startfile, u"nonexisting.vbs")
+
+ def test_empty(self):
+ empty = path.join(path.dirname(__file__), "empty.vbs")
+ startfile(empty)
+ startfile(empty, "open")
+
+ def test_empty_u(self):
+ empty = path.join(path.dirname(__file__), "empty.vbs")
+ startfile(unicode(empty, "mbcs"))
+ startfile(unicode(empty, "mbcs"), "open")
+
+def test_main():
+ test_support.run_unittest(TestCase)
+
+if __name__=="__main__":
+ test_main()
diff --git a/Lib/test/test_sundry.py b/Lib/test/test_sundry.py
index fd10b68..af13684 100644
--- a/Lib/test/test_sundry.py
+++ b/Lib/test/test_sundry.py
@@ -12,75 +12,50 @@ warnings.filterwarnings("ignore",
from test.test_support import verbose
import BaseHTTPServer
+import DocXMLRPCServer
import CGIHTTPServer
-import Queue
import SimpleHTTPServer
-import SocketServer
+import SimpleXMLRPCServer
import aifc
-import anydbm
import audiodev
import bdb
+import cgitb
import cmd
import code
-import codeop
-import colorsys
-import commands
import compileall
-try:
- import curses # not available on Windows
-except ImportError:
- if verbose:
- print "skipping curses"
-import dircache
-import dis
-import distutils
-import doctest
-import dumbdbm
import encodings
-import fnmatch
import formatter
-import fpformat
import ftplib
import getpass
-import glob
import gopherlib
import htmlentitydefs
-import htmllib
-import httplib
-import imaplib
+import ihooks
import imghdr
import imputil
import keyword
-import macpath
+import linecache
import macurl2path
import mailcap
-import mhlib
-import mimetypes
import mimify
-import multifile
import mutex
import nntplib
import nturl2path
+import opcode
+import os2emxpath
import pdb
import pipes
#import poplib
import posixfile
-import profile
import pstats
import py_compile
-#import reconvert
-import repr
+import pydoc
+import rexec
try:
import rlcompleter # not available on Windows
except ImportError:
if verbose:
print "skipping rlcompleter"
-import robotparser
import sched
-import sgmllib
-import shelve
-import shlex
-import shutil
import smtplib
import sndhdr
import statvfs
@@ -90,12 +65,17 @@ import sunaudio
import symbol
import tabnanny
import telnetlib
-import test
+import timeit
import toaiff
-import urllib2
+import token
+try:
+ import tty # not available on Windows
+except ImportError:
+ if verbose:
+ print "skipping tty"
+
# Can't test the "user" module -- if the user has a ~/.pythonrc.py, it
# can screw up all sorts of things (esp. if it prints!).
#import user
import webbrowser
-import whichdb
import xml
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index b98c648..ae2a1c8 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -261,6 +261,11 @@ class SysModuleTest(unittest.TestCase):
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
+ def test_43581(self):
+ # Can't use sys.stdout, as this is a cStringIO object when
+ # the test runs under regrtest.
+ self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
+
def test_main():
test.test_support.run_unittest(SysModuleTest)
diff --git a/Lib/test/test_timeout.py b/Lib/test/test_timeout.py
index cb19d9e..4309e8c 100644
--- a/Lib/test/test_timeout.py
+++ b/Lib/test/test_timeout.py
@@ -113,8 +113,9 @@ class TimeoutTestCase(unittest.TestCase):
# If we are too close to www.python.org, this test will fail.
# Pick a host that should be farther away.
- if socket.getfqdn().split('.')[-2:] == ['python', 'org']:
- self.addr_remote = ('python.net', 80)
+ if (socket.getfqdn().split('.')[-2:] == ['python', 'org'] or
+ socket.getfqdn().split('.')[-2:-1] == ['xs4all']):
+ self.addr_remote = ('tut.fi', 80)
_t1 = time.time()
self.failUnlessRaises(socket.error, self.sock.connect,
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index d3c1cc4..b064967 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1,70 +1,30 @@
-from test.test_support import verbose, findfile, is_resource_enabled, TestFailed
import os, glob, random
+from cStringIO import StringIO
+from test.test_support import (verbose, findfile, is_resource_enabled,
+ TestFailed)
from tokenize import (tokenize, generate_tokens, untokenize,
NUMBER, NAME, OP, STRING)
-if verbose:
- print 'starting...'
-
-f = file(findfile('tokenize_tests' + os.extsep + 'txt'))
-tokenize(f.readline)
-f.close()
-
-
-
-###### Test roundtrip for untokenize ##########################
-
+# Test roundtrip for `untokenize`. `f` is a file path. The source code in f
+# is tokenized, converted back to source code via tokenize.untokenize(),
+# and tokenized again from the latter. The test fails if the second
+# tokenization doesn't match the first.
def test_roundtrip(f):
## print 'Testing:', f
- f = file(f)
+ fobj = open(f)
try:
- fulltok = list(generate_tokens(f.readline))
+ fulltok = list(generate_tokens(fobj.readline))
finally:
- f.close()
+ fobj.close()
t1 = [tok[:2] for tok in fulltok]
newtext = untokenize(t1)
readline = iter(newtext.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
- assert t1 == t2
-
-
-f = findfile('tokenize_tests' + os.extsep + 'txt')
-test_roundtrip(f)
-
-testdir = os.path.dirname(f) or os.curdir
-testfiles = glob.glob(testdir + os.sep + 'test*.py')
-if not is_resource_enabled('compiler'):
- testfiles = random.sample(testfiles, 10)
-
-for f in testfiles:
- test_roundtrip(f)
-
-
-###### Test detecton of IndentationError ######################
-
-from cStringIO import StringIO
-
-sampleBadText = """
-def foo():
- bar
- baz
-"""
-
-try:
- for tok in generate_tokens(StringIO(sampleBadText).readline):
- pass
-except IndentationError:
- pass
-else:
- raise TestFailed("Did not detect IndentationError:")
-
-
-###### Test example in the docs ###############################
-
-from decimal import Decimal
-from cStringIO import StringIO
+ if t1 != t2:
+ raise TestFailed("untokenize() roundtrip failed for %r" % f)
+# This is an example from the docs, set up as a doctest.
def decistmt(s):
"""Substitute Decimals for floats in a string of statements.
@@ -73,12 +33,21 @@ def decistmt(s):
>>> decistmt(s)
"print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')"
- >>> exec(s)
- -3.21716034272e-007
+ The format of the exponent is inherited from the platform C library.
+ Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
+ we're only showing 12 digits, and the 13th isn't close to 5, the
+ rest of the output should be platform-independent.
+
+ >>> exec(s) #doctest: +ELLIPSIS
+ -3.21716034272e-0...7
+
+ Output from calculations with Decimal should be identical across all
+ platforms.
+
>>> exec(decistmt(s))
-3.217160342717258261933904529E-7
-
"""
+
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
@@ -93,8 +62,53 @@ def decistmt(s):
result.append((toknum, tokval))
return untokenize(result)
-import doctest
-doctest.testmod()
+def test_main():
+ if verbose:
+ print 'starting...'
+
+ # This displays the tokenization of tokenize_tests.py to stdout, and
+ # regrtest.py checks that this equals the expected output (in the
+ # test/output/ directory).
+ f = open(findfile('tokenize_tests' + os.extsep + 'txt'))
+ tokenize(f.readline)
+ f.close()
+
+ # Now run test_roundtrip() over tokenize_test.py too, and over all
+ # (if the "compiler" resource is enabled) or a small random sample (if
+ # "compiler" is not enabled) of the test*.py files.
+ f = findfile('tokenize_tests' + os.extsep + 'txt')
+ test_roundtrip(f)
+
+ testdir = os.path.dirname(f) or os.curdir
+ testfiles = glob.glob(testdir + os.sep + 'test*.py')
+ if not is_resource_enabled('compiler'):
+ testfiles = random.sample(testfiles, 10)
+
+ for f in testfiles:
+ test_roundtrip(f)
+
+ # Test detecton of IndentationError.
+ sampleBadText = """\
+def foo():
+ bar
+ baz
+"""
+
+ try:
+ for tok in generate_tokens(StringIO(sampleBadText).readline):
+ pass
+ except IndentationError:
+ pass
+ else:
+ raise TestFailed("Did not detect IndentationError:")
+
+ # Run the doctests in this module.
+ from test import test_tokenize # i.e., this module
+ from test.test_support import run_doctest
+ run_doctest(test_tokenize)
+
+ if verbose:
+ print 'finished'
-if verbose:
- print 'finished'
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_trace.py b/Lib/test/test_trace.py
index 944ff9a..4f946f7 100644
--- a/Lib/test/test_trace.py
+++ b/Lib/test/test_trace.py
@@ -13,7 +13,15 @@ basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
-# Armin Rigo's failing example:
+# Many of the tests below are tricky because they involve pass statements.
+# If there is implicit control flow around a pass statement (in an except
+# clause or else caluse) under what conditions do you set a line number
+# following that clause?
+
+
+# The entire "while 0:" statement is optimized away. No code
+# exists for it, so the line numbers skip directly from "del x"
+# to "x = 1".
def arigo_example():
x = 1
del x
@@ -24,7 +32,6 @@ def arigo_example():
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
- (3, 'line'),
(5, 'line'),
(5, 'return')]
@@ -60,14 +67,16 @@ no_pop_tops.events = [(0, 'call'),
(2, 'return')]
def no_pop_blocks():
- while 0:
+ y = 1
+ while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
- (3, 'line'),
- (3, 'return')]
+ (2, 'line'),
+ (4, 'line'),
+ (4, 'return')]
def called(): # line -3
x = 1
@@ -127,6 +136,13 @@ settrace_and_raise.events = [(2, 'exception'),
(4, 'return')]
# implicit return example
+# This test is interesting because of the else: pass
+# part of the code. The code generate for the true
+# part of the if contains a jump past the else branch.
+# The compiler then generates an implicit "return None"
+# Internally, the compiler visits the pass statement
+# and stores its line number for use on the next instruction.
+# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
@@ -140,7 +156,8 @@ ireturn_example.events = [(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
- (4, 'return')]
+ (6, 'line'),
+ (6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
@@ -221,14 +238,12 @@ class TraceTestCase(unittest.TestCase):
def test_01_basic(self):
self.run_test(basic)
-## XXX: These tests fail with the new ast compiler. They must
-## be fixed before a release.
-## def test_02_arigo(self):
-## self.run_test(arigo_example)
+ def test_02_arigo(self):
+ self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
-## def test_04_no_pop_blocks(self):
-## self.run_test(no_pop_blocks)
+ def test_04_no_pop_blocks(self):
+ self.run_test(no_pop_blocks)
## def test_05_no_pop_tops(self):
## self.run_test(no_pop_tops)
def test_06_call(self):
@@ -240,8 +255,8 @@ class TraceTestCase(unittest.TestCase):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
-## def test_10_ireturn(self):
-## self.run_test(ireturn_example)
+ def test_10_ireturn(self):
+ self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
@@ -579,17 +594,14 @@ class JumpTestCase(unittest.TestCase):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
-# XXX: These tests cause the interpreter to crash. The frame_setlineno()
-# function no longer works correctly because the lineno table generated by
-# the AST compiler is slightly different than with the old compiler.
-# def test_10_no_jump_to_except_1(self):
-# self.run_test(no_jump_to_except_1)
-# def test_11_no_jump_to_except_2(self):
-# self.run_test(no_jump_to_except_2)
-# def test_12_no_jump_to_except_3(self):
-# self.run_test(no_jump_to_except_3)
-# def test_13_no_jump_to_except_4(self):
-# self.run_test(no_jump_to_except_4)
+ def test_10_no_jump_to_except_1(self):
+ self.run_test(no_jump_to_except_1)
+ def test_11_no_jump_to_except_2(self):
+ self.run_test(no_jump_to_except_2)
+ def test_12_no_jump_to_except_3(self):
+ self.run_test(no_jump_to_except_3)
+ def test_13_no_jump_to_except_4(self):
+ self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py
index 29a120f..22c0456 100644
--- a/Lib/test/test_traceback.py
+++ b/Lib/test/test_traceback.py
@@ -24,6 +24,9 @@ class TracebackCases(unittest.TestCase):
# XXX why doesn't compile raise the same traceback?
import test.badsyntax_nocaret
+ def syntax_error_bad_indentation(self):
+ compile("def spam():\n print 1\n print 2", "?", "exec")
+
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
@@ -40,6 +43,13 @@ class TracebackCases(unittest.TestCase):
self.assert_(len(err) == 3)
self.assert_(err[1].strip() == "[x for x in x] = x")
+ def test_bad_indentation(self):
+ err = self.get_exception_format(self.syntax_error_bad_indentation,
+ IndentationError)
+ self.assert_(len(err) == 4)
+ self.assert_("^" in err[2])
+ self.assert_(err[1].strip() == "print 2")
+
def test_bug737473(self):
import sys, os, tempfile, time
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index 49ef29d..c7113b5 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -810,6 +810,22 @@ class UnicodeTest(
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
+ def test_unicode_repr(self):
+ class s1:
+ def __repr__(self):
+ return '\\n'
+
+ class s2:
+ def __repr__(self):
+ return u'\\n'
+
+ self.assertEqual(repr(s1()), '\\n')
+ self.assertEqual(repr(s2()), '\\n')
+
+
+
+
+
def test_main():
test_support.run_unittest(UnicodeTest)
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
index 2843138..64a2ee9 100644
--- a/Lib/test/test_urllib2.py
+++ b/Lib/test/test_urllib2.py
@@ -13,8 +13,7 @@ from urllib2 import Request, OpenerDirector
# parse_keqv_list, parse_http_list (I'm leaving this for Anthony Baxter
# and Greg Stein, since they're doing Digest Authentication)
# Authentication stuff (ditto)
-# ProxyHandler, CustomProxy, CustomProxyHandler (I don't use a proxy)
-# GopherHandler (haven't used gopher for a decade or so...)
+# CustomProxy, CustomProxyHandler
class TrivialTests(unittest.TestCase):
def test_trivial(self):
@@ -90,6 +89,7 @@ class FakeMethod:
return self.handle(self.meth_name, self.action, *args)
class MockHandler:
+ handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
@@ -154,7 +154,7 @@ def add_ordered_mock_handlers(opener, meth_spec):
for meths in meth_spec:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
- h.handler_order = count
+ h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
@@ -349,13 +349,19 @@ class HandlerTests(unittest.TestCase):
TESTFN = test_support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
- for url in [
+ urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
- "file://%s%s" % (socket.gethostbyname(socket.gethostname()),
- urlpath),
- ]:
+ ]
+ try:
+ localaddr = socket.gethostbyname(socket.gethostname())
+ except socket.gaierror:
+ localaddr = ''
+ if localaddr:
+ urls.append("file://%s%s" % (localaddr, urlpath))
+
+ for url in urls:
f = open(TESTFN, "wb")
try:
try:
@@ -636,6 +642,23 @@ class HandlerTests(unittest.TestCase):
o.open("http://www.example.com/")
self.assert_(not hh.req.has_header("Cookie"))
+ def test_proxy(self):
+ o = OpenerDirector()
+ ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
+ o.add_handler(ph)
+ meth_spec = [
+ [("http_open", "return response")]
+ ]
+ handlers = add_ordered_mock_handlers(o, meth_spec)
+
+ req = Request("http://acme.example.com/")
+ self.assertEqual(req.get_host(), "acme.example.com")
+ r = o.open(req)
+ self.assertEqual(req.get_host(), "proxy.example.com:3128")
+
+ self.assertEqual([(handlers[0], "http_open")],
+ [tup[0:2] for tup in o.calls])
+
class MiscTests(unittest.TestCase):
@@ -821,6 +844,7 @@ class NetworkTests(unittest.TestCase):
def test_main(verbose=None):
+ test_support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 39ada06..5cee458 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -12,15 +12,53 @@ class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urlparse.urlparse(url)
self.assertEqual(result, parsed)
+ t = (result.scheme, result.netloc, result.path,
+ result.params, result.query, result.fragment)
+ self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urlparse.urlunparse(result)
self.assertEqual(result2, url)
+ self.assertEqual(result2, result.geturl())
+
+ # the result of geturl() is a fixpoint; we can always parse it
+ # again to get the same result:
+ result3 = urlparse.urlparse(result.geturl())
+ self.assertEqual(result3.geturl(), result.geturl())
+ self.assertEqual(result3, result)
+ self.assertEqual(result3.scheme, result.scheme)
+ self.assertEqual(result3.netloc, result.netloc)
+ self.assertEqual(result3.path, result.path)
+ self.assertEqual(result3.params, result.params)
+ self.assertEqual(result3.query, result.query)
+ self.assertEqual(result3.fragment, result.fragment)
+ self.assertEqual(result3.username, result.username)
+ self.assertEqual(result3.password, result.password)
+ self.assertEqual(result3.hostname, result.hostname)
+ self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urlparse.urlsplit(url)
self.assertEqual(result, split)
+ t = (result.scheme, result.netloc, result.path,
+ result.query, result.fragment)
+ self.assertEqual(t, split)
result2 = urlparse.urlunsplit(result)
self.assertEqual(result2, url)
+ self.assertEqual(result2, result.geturl())
+
+ # check the fixpoint property of re-parsing the result of geturl()
+ result3 = urlparse.urlsplit(result.geturl())
+ self.assertEqual(result3.geturl(), result.geturl())
+ self.assertEqual(result3, result)
+ self.assertEqual(result3.scheme, result.scheme)
+ self.assertEqual(result3.netloc, result.netloc)
+ self.assertEqual(result3.path, result.path)
+ self.assertEqual(result3.query, result.query)
+ self.assertEqual(result3.fragment, result.fragment)
+ self.assertEqual(result3.username, result.username)
+ self.assertEqual(result3.password, result.password)
+ self.assertEqual(result3.hostname, result.hostname)
+ self.assertEqual(result3.port, result.port)
def test_roundtrips(self):
testcases = [
@@ -187,6 +225,69 @@ class UrlParseTestCase(unittest.TestCase):
]:
self.assertEqual(urlparse.urldefrag(url), (defrag, frag))
+ def test_urlsplit_attributes(self):
+ url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, "http")
+ self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
+ self.assertEqual(p.path, "/doc/")
+ self.assertEqual(p.query, "")
+ self.assertEqual(p.fragment, "frag")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, "www.python.org")
+ self.assertEqual(p.port, None)
+ # geturl() won't return exactly the original URL in this case
+ # since the scheme is always case-normalized
+ #self.assertEqual(p.geturl(), url)
+
+ url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, "http")
+ self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
+ self.assertEqual(p.path, "/doc/")
+ self.assertEqual(p.query, "query=yes")
+ self.assertEqual(p.fragment, "frag")
+ self.assertEqual(p.username, "User")
+ self.assertEqual(p.password, "Pass")
+ self.assertEqual(p.hostname, "www.python.org")
+ self.assertEqual(p.port, 80)
+ self.assertEqual(p.geturl(), url)
+
+ def test_attributes_bad_port(self):
+ """Check handling of non-integer ports."""
+ p = urlparse.urlsplit("http://www.example.net:foo")
+ self.assertEqual(p.netloc, "www.example.net:foo")
+ self.assertRaises(ValueError, lambda: p.port)
+
+ p = urlparse.urlparse("http://www.example.net:foo")
+ self.assertEqual(p.netloc, "www.example.net:foo")
+ self.assertRaises(ValueError, lambda: p.port)
+
+ def test_attributes_without_netloc(self):
+ # This example is straight from RFC 3261. It looks like it
+ # should allow the username, hostname, and port to be filled
+ # in, but doesn't. Since it's a URI and doesn't use the
+ # scheme://netloc syntax, the netloc and related attributes
+ # should be left empty.
+ uri = "sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
+ p = urlparse.urlsplit(uri)
+ self.assertEqual(p.netloc, "")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, None)
+ self.assertEqual(p.port, None)
+ self.assertEqual(p.geturl(), uri)
+
+ p = urlparse.urlparse(uri)
+ self.assertEqual(p.netloc, "")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, None)
+ self.assertEqual(p.port, None)
+ self.assertEqual(p.geturl(), uri)
+
+
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff --git a/Lib/test/test_wait3.py b/Lib/test/test_wait3.py
new file mode 100644
index 0000000..f6a41a6
--- /dev/null
+++ b/Lib/test/test_wait3.py
@@ -0,0 +1,32 @@
+"""This test checks for correct wait3() behavior.
+"""
+
+import os
+from test.fork_wait import ForkWait
+from test.test_support import TestSkipped, run_unittest
+
+try:
+ os.fork
+except AttributeError:
+ raise TestSkipped, "os.fork not defined -- skipping test_wait3"
+
+try:
+ os.wait3
+except AttributeError:
+ raise TestSkipped, "os.wait3 not defined -- skipping test_wait3"
+
+class Wait3Test(ForkWait):
+ def wait_impl(self, cpid):
+ while 1:
+ spid, status, rusage = os.wait3(0)
+ if spid == cpid:
+ break
+ self.assertEqual(spid, cpid)
+ self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
+ self.assertTrue(rusage)
+
+def test_main():
+ run_unittest(Wait3Test)
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_wait4.py b/Lib/test/test_wait4.py
new file mode 100644
index 0000000..027e5c3
--- /dev/null
+++ b/Lib/test/test_wait4.py
@@ -0,0 +1,29 @@
+"""This test checks for correct wait4() behavior.
+"""
+
+import os
+from test.fork_wait import ForkWait
+from test.test_support import TestSkipped, run_unittest
+
+try:
+ os.fork
+except AttributeError:
+ raise TestSkipped, "os.fork not defined -- skipping test_wait4"
+
+try:
+ os.wait4
+except AttributeError:
+ raise TestSkipped, "os.wait4 not defined -- skipping test_wait4"
+
+class Wait4Test(ForkWait):
+ def wait_impl(self, cpid):
+ spid, status, rusage = os.wait4(cpid, 0)
+ self.assertEqual(spid, cpid)
+ self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
+ self.assertTrue(rusage)
+
+def test_main():
+ run_unittest(Wait4Test)
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_warnings.py b/Lib/test/test_warnings.py
index b7061c1..5d051a5 100644
--- a/Lib/test/test_warnings.py
+++ b/Lib/test/test_warnings.py
@@ -82,6 +82,10 @@ class TestModule(unittest.TestCase):
self.assertEqual(msg.category, 'UserWarning')
def test_main(verbose=None):
+ # Obscure hack so that this test passes after reloads or repeated calls
+ # to test_main (regrtest -R).
+ if '__warningregistry__' in globals():
+ del globals()['__warningregistry__']
test_support.run_unittest(TestModule)
if __name__ == "__main__":
diff --git a/Lib/test/test_winsound.py b/Lib/test/test_winsound.py
index 77c432a..19d4459 100644
--- a/Lib/test/test_winsound.py
+++ b/Lib/test/test_winsound.py
@@ -3,6 +3,9 @@
import unittest
from test import test_support
import winsound, time
+import os
+import subprocess
+
class BeepTest(unittest.TestCase):
@@ -44,6 +47,7 @@ class MessageBeepTest(unittest.TestCase):
def test_question(self):
winsound.MessageBeep(winsound.MB_ICONQUESTION)
+
class PlaySoundTest(unittest.TestCase):
def test_errors(self):
@@ -56,19 +60,54 @@ class PlaySoundTest(unittest.TestCase):
)
def test_alias_asterisk(self):
- winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
+ if _have_soundcard():
+ winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ 'SystemAsterisk', winsound.SND_ALIAS
+ )
def test_alias_exclamation(self):
- winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
+ if _have_soundcard():
+ winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ 'SystemExclamation', winsound.SND_ALIAS
+ )
def test_alias_exit(self):
- winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
+ if _have_soundcard():
+ winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ 'SystemExit', winsound.SND_ALIAS
+ )
def test_alias_hand(self):
- winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
+ if _have_soundcard():
+ winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ 'SystemHand', winsound.SND_ALIAS
+ )
def test_alias_question(self):
- winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
+ if _have_soundcard():
+ winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ 'SystemQuestion', winsound.SND_ALIAS
+ )
def test_alias_fallback(self):
# This test can't be expected to work on all systems. The MS
@@ -85,41 +124,83 @@ class PlaySoundTest(unittest.TestCase):
return
def test_alias_nofallback(self):
- # Note that this is not the same as asserting RuntimeError
- # will get raised: you cannot convert this to
- # self.assertRaises(...) form. The attempt may or may not
- # raise RuntimeError, but it shouldn't raise anything other
- # than RuntimeError, and that's all we're trying to test here.
- # The MS docs aren't clear about whether the SDK PlaySound()
- # with SND_ALIAS and SND_NODEFAULT will return True or False when
- # the alias is unknown. On Tim's WinXP box today, it returns
- # True (no exception is raised). What we'd really like to test
- # is that no sound is played, but that requires first wiring an
- # eardrum class into unittest <wink>.
- try:
- winsound.PlaySound(
- '!"$%&/(#+*',
- winsound.SND_ALIAS | winsound.SND_NODEFAULT
+ if _have_soundcard():
+ # Note that this is not the same as asserting RuntimeError
+ # will get raised: you cannot convert this to
+ # self.assertRaises(...) form. The attempt may or may not
+ # raise RuntimeError, but it shouldn't raise anything other
+ # than RuntimeError, and that's all we're trying to test
+ # here. The MS docs aren't clear about whether the SDK
+ # PlaySound() with SND_ALIAS and SND_NODEFAULT will return
+ # True or False when the alias is unknown. On Tim's WinXP
+ # box today, it returns True (no exception is raised). What
+ # we'd really like to test is that no sound is played, but
+ # that requires first wiring an eardrum class into unittest
+ # <wink>.
+ try:
+ winsound.PlaySound(
+ '!"$%&/(#+*',
+ winsound.SND_ALIAS | winsound.SND_NODEFAULT
+ )
+ except RuntimeError:
+ pass
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ '!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
- except RuntimeError:
- pass
def test_stopasync(self):
- winsound.PlaySound(
- 'SystemQuestion',
- winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
- )
- time.sleep(0.5)
- try:
+ if _have_soundcard():
winsound.PlaySound(
'SystemQuestion',
- winsound.SND_ALIAS | winsound.SND_NOSTOP
+ winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
+ )
+ time.sleep(0.5)
+ try:
+ winsound.PlaySound(
+ 'SystemQuestion',
+ winsound.SND_ALIAS | winsound.SND_NOSTOP
+ )
+ except RuntimeError:
+ pass
+ else: # the first sound might already be finished
+ pass
+ winsound.PlaySound(None, winsound.SND_PURGE)
+ else:
+ self.assertRaises(
+ RuntimeError,
+ winsound.PlaySound,
+ None, winsound.SND_PURGE
)
- except RuntimeError:
- pass
- else: # the first sound might already be finished
- pass
- winsound.PlaySound(None, winsound.SND_PURGE)
+
+
+def _get_cscript_path():
+ """Return the full path to cscript.exe or None."""
+ for dir in os.environ.get("PATH", "").split(os.pathsep):
+ cscript_path = os.path.join(dir, "cscript.exe")
+ if os.path.exists(cscript_path):
+ return cscript_path
+
+__have_soundcard_cache = None
+def _have_soundcard():
+ """Return True iff this computer has a soundcard."""
+ global __have_soundcard_cache
+ if __have_soundcard_cache is None:
+ cscript_path = _get_cscript_path()
+ if cscript_path is None:
+ # Could not find cscript.exe to run our VBScript helper. Default
+ # to True: most computers these days *do* have a soundcard.
+ return True
+
+ check_script = os.path.join(os.path.dirname(__file__),
+ "check_soundcard.vbs")
+ p = subprocess.Popen([cscript_path, check_script],
+ stdout=subprocess.PIPE)
+ __have_soundcard_cache = not p.wait()
+ return __have_soundcard_cache
+
def test_main():
test_support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest)
diff --git a/Lib/test/test_with.py b/Lib/test/test_with.py
index 4854436..48e00f4 100644
--- a/Lib/test/test_with.py
+++ b/Lib/test/test_with.py
@@ -494,6 +494,62 @@ class ExceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
+ def testRaisedStopIteration1(self):
+ @contextmanager
+ def cm():
+ yield
+
+ def shouldThrow():
+ with cm():
+ raise StopIteration("from with")
+
+ self.assertRaises(StopIteration, shouldThrow)
+
+ def testRaisedStopIteration2(self):
+ class cm (object):
+ def __context__(self):
+ return self
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, type, value, traceback):
+ pass
+
+ def shouldThrow():
+ with cm():
+ raise StopIteration("from with")
+
+ self.assertRaises(StopIteration, shouldThrow)
+
+ def testRaisedGeneratorExit1(self):
+ @contextmanager
+ def cm():
+ yield
+
+ def shouldThrow():
+ with cm():
+ raise GeneratorExit("from with")
+
+ self.assertRaises(GeneratorExit, shouldThrow)
+
+ def testRaisedGeneratorExit2(self):
+ class cm (object):
+ def __context__(self):
+ return self
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, type, value, traceback):
+ pass
+
+ def shouldThrow():
+ with cm():
+ raise GeneratorExit("from with")
+
+ self.assertRaises(GeneratorExit, shouldThrow)
+
class NonLocalFlowControlTestCase(unittest.TestCase):
diff --git a/Lib/test/test_xrange.py b/Lib/test/test_xrange.py
index 40590cd..c0d1dbe 100644
--- a/Lib/test/test_xrange.py
+++ b/Lib/test/test_xrange.py
@@ -57,12 +57,7 @@ class XrangeTest(unittest.TestCase):
self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint)
r = xrange(-sys.maxint, sys.maxint, 2)
- if sys.maxint > 0x7fffffff:
- # XXX raising ValueError is less than ideal, but this can't
- # be fixed until range_length() returns a long in rangeobject.c
- self.assertRaises(ValueError, len, r)
- else:
- self.assertEqual(len(r), sys.maxint)
+ self.assertEqual(len(r), sys.maxint)
self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2)
def test_main():
diff --git a/Lib/test/test_zipimport.py b/Lib/test/test_zipimport.py
index eb7cbf6..4e1a845 100644
--- a/Lib/test/test_zipimport.py
+++ b/Lib/test/test_zipimport.py
@@ -12,7 +12,12 @@ from test import test_support
from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co
import zipimport
-
+import linecache
+import doctest
+import inspect
+import StringIO
+from traceback import extract_tb, extract_stack, print_tb
+raise_src = 'def do_raise(): raise TypeError\n'
# so we only run testAFakeZlib once if this test is run repeatedly
# which happens when we look for ref leaks
@@ -54,7 +59,8 @@ class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
def setUp(self):
# We're reusing the zip archive path, so we must clear the
- # cached directory info.
+ # cached directory info and linecache
+ linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
@@ -83,6 +89,11 @@ class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
mod = __import__(".".join(modules), globals(), locals(),
["__dummy__"])
+
+ call = kw.get('call')
+ if call is not None:
+ call(mod)
+
if expected_ext:
file = mod.get_file()
self.assertEquals(file, os.path.join(TEMP_ZIP,
@@ -249,6 +260,74 @@ class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
self.doTest(".py", files, TESTMOD,
stuff="Some Stuff"*31)
+ def assertModuleSource(self, module):
+ self.assertEqual(inspect.getsource(module), test_src)
+
+ def testGetSource(self):
+ files = {TESTMOD + ".py": (NOW, test_src)}
+ self.doTest(".py", files, TESTMOD, call=self.assertModuleSource)
+
+ def testGetCompiledSource(self):
+ pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW)
+ files = {TESTMOD + ".py": (NOW, test_src),
+ TESTMOD + pyc_ext: (NOW, pyc)}
+ self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource)
+
+ def runDoctest(self, callback):
+ files = {TESTMOD + ".py": (NOW, test_src),
+ "xyz.txt": (NOW, ">>> log.append(True)\n")}
+ self.doTest(".py", files, TESTMOD, call=callback)
+
+ def doDoctestFile(self, module):
+ log = []
+ old_master, doctest.master = doctest.master, None
+ try:
+ doctest.testfile(
+ 'xyz.txt', package=module, module_relative=True,
+ globs=locals()
+ )
+ finally:
+ doctest.master = old_master
+ self.assertEqual(log,[True])
+
+ def testDoctestFile(self):
+ self.runDoctest(self.doDoctestFile)
+
+ def doDoctestSuite(self, module):
+ log = []
+ doctest.DocFileTest(
+ 'xyz.txt', package=module, module_relative=True,
+ globs=locals()
+ ).run()
+ self.assertEqual(log,[True])
+
+ def testDoctestSuite(self):
+ self.runDoctest(self.doDoctestSuite)
+
+
+ def doTraceback(self, module):
+ try:
+ module.do_raise()
+ except:
+ tb = sys.exc_info()[2].tb_next
+
+ f,lno,n,line = extract_tb(tb, 1)[0]
+ self.assertEqual(line, raise_src.strip())
+
+ f,lno,n,line = extract_stack(tb.tb_frame, 1)[0]
+ self.assertEqual(line, raise_src.strip())
+
+ s = StringIO.StringIO()
+ print_tb(tb, 1, s)
+ self.failUnless(s.getvalue().endswith(raise_src))
+ else:
+ raise AssertionError("This ought to be impossible")
+
+ def testTraceback(self):
+ files = {TESTMOD + ".py": (NOW, raise_src)}
+ self.doTest(None, files, TESTMOD, call=self.doTraceback)
+
+
class CompressedZipImportTestCase(UncompressedZipImportTestCase):
compression = ZIP_DEFLATED
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 2b40e6f..a30791c 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -182,7 +182,7 @@ def untokenize(iterable):
for tok in iterable:
toknum, tokval = tok[:2]
- if toknum == NAME:
+ if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
diff --git a/Lib/traceback.py b/Lib/traceback.py
index 93a64b7..abda422 100644
--- a/Lib/traceback.py
+++ b/Lib/traceback.py
@@ -66,7 +66,7 @@ def print_tb(tb, limit=None, file=None):
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
linecache.checkcache(filename)
- line = linecache.getline(filename, lineno)
+ line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1
@@ -98,7 +98,7 @@ def extract_tb(tb, limit = None):
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
- line = linecache.getline(filename, lineno)
+ line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
@@ -158,14 +158,14 @@ def format_exception_only(etype, value):
"""
list = []
if (type(etype) == types.ClassType
- or issubclass(etype, Exception)):
+ or (isinstance(etype, type) and issubclass(etype, Exception))):
stype = etype.__name__
else:
stype = etype
if value is None:
list.append(str(stype) + '\n')
else:
- if etype is SyntaxError:
+ if issubclass(etype, SyntaxError):
try:
msg, (filename, lineno, offset, line) = value
except:
@@ -279,7 +279,7 @@ def extract_stack(f=None, limit = None):
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
- line = linecache.getline(filename, lineno)
+ line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
diff --git a/Lib/urllib.py b/Lib/urllib.py
index 136f42e..d65c0b0 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -442,9 +442,9 @@ class URLopener:
return addinfourl(fp, noheaders(), "gopher:" + url)
def open_file(self, url):
+ """Use local file or FTP depending on form of URL."""
if not isinstance(url, str):
raise IOError, ('file error', 'proxy support for file protocol currently not implemented')
- """Use local file or FTP depending on form of URL."""
if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
return self.open_ftp(url)
else:
@@ -1032,7 +1032,7 @@ def splithost(url):
global _hostprog
if _hostprog is None:
import re
- _hostprog = re.compile('^//([^/]*)(.*)$')
+ _hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match: return match.group(1, 2)
diff --git a/Lib/urllib2.py b/Lib/urllib2.py
index 4c83bfc..ec01c8f 100644
--- a/Lib/urllib2.py
+++ b/Lib/urllib2.py
@@ -14,7 +14,7 @@ non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
-urlopen(url, data=None) -- basic usage is that same as original
+urlopen(url, data=None) -- basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
@@ -77,16 +77,13 @@ f = urllib2.urlopen('http://www.python.org/')
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
-
-# XXX to do:
-# name!
-# documentation (getting there)
-# complex proxies
-# abstract factory for opener
# ftp errors aren't handled cleanly
-# gopher can return a socket.error
# check digest against correct (i.e. non-apache) implementation
+# Possible extensions:
+# complex proxies XXX not sure what exactly was meant by this
+# abstract factory for opener
+
import base64
import ftplib
import httplib
@@ -111,15 +108,15 @@ try:
except ImportError:
from StringIO import StringIO
-# not sure how many of these need to be gotten rid of
-from urllib import (unwrap, unquote, splittype, splithost,
+from urllib import (unwrap, unquote, splittype, splithost, quote,
addinfourl, splitport, splitgophertype, splitquery,
splitattr, ftpwrapper, noheaders, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies
-__version__ = "2.4"
+# used in User-Agent header sent
+__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None):
@@ -330,8 +327,9 @@ class OpenerDirector:
pass
def _call_chain(self, chain, kind, meth_name, *args):
- # XXX raise an exception if no one else should try to handle
- # this url. return None if you can't but someone else could.
+ # Handlers raise an exception if no one else should try to handle
+ # the request, or return None if they can't but another handler
+ # could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
@@ -507,6 +505,8 @@ class HTTPRedirectHandler(BaseHandler):
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
+ # be conciliant with URIs containing a space
+ newurl = newurl.replace(' ', '%20')
return Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
@@ -561,6 +561,80 @@ class HTTPRedirectHandler(BaseHandler):
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
+
+def _parse_proxy(proxy):
+ """Return (scheme, user, password, host/port) given a URL or an authority.
+
+ If a URL is supplied, it must have an authority (host:port) component.
+ According to RFC 3986, having an authority component means the URL must
+ have two slashes after the scheme:
+
+ >>> _parse_proxy('file:/ftp.example.com/')
+ Traceback (most recent call last):
+ ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
+
+ The first three items of the returned tuple may be None.
+
+ Examples of authority parsing:
+
+ >>> _parse_proxy('proxy.example.com')
+ (None, None, None, 'proxy.example.com')
+ >>> _parse_proxy('proxy.example.com:3128')
+ (None, None, None, 'proxy.example.com:3128')
+
+ The authority component may optionally include userinfo (assumed to be
+ username:password):
+
+ >>> _parse_proxy('joe:password@proxy.example.com')
+ (None, 'joe', 'password', 'proxy.example.com')
+ >>> _parse_proxy('joe:password@proxy.example.com:3128')
+ (None, 'joe', 'password', 'proxy.example.com:3128')
+
+ Same examples, but with URLs instead:
+
+ >>> _parse_proxy('http://proxy.example.com/')
+ ('http', None, None, 'proxy.example.com')
+ >>> _parse_proxy('http://proxy.example.com:3128/')
+ ('http', None, None, 'proxy.example.com:3128')
+ >>> _parse_proxy('http://joe:password@proxy.example.com/')
+ ('http', 'joe', 'password', 'proxy.example.com')
+ >>> _parse_proxy('http://joe:password@proxy.example.com:3128')
+ ('http', 'joe', 'password', 'proxy.example.com:3128')
+
+ Everything after the authority is ignored:
+
+ >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
+ ('ftp', 'joe', 'password', 'proxy.example.com')
+
+ Test for no trailing '/' case:
+
+ >>> _parse_proxy('http://joe:password@proxy.example.com')
+ ('http', 'joe', 'password', 'proxy.example.com')
+
+ """
+ from urlparse import _splitnetloc
+ scheme, r_scheme = splittype(proxy)
+ if not r_scheme.startswith("/"):
+ # authority
+ scheme = None
+ authority = proxy
+ else:
+ # URL
+ if not r_scheme.startswith("//"):
+ raise ValueError("proxy URL with no authority: %r" % proxy)
+ # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
+ # and 3.3.), path is empty or starts with '/'
+ end = r_scheme.find("/", 2)
+ if end == -1:
+ end = None
+ authority = r_scheme[2:end]
+ userinfo, hostport = splituser(authority)
+ if userinfo is not None:
+ user, password = splitpasswd(userinfo)
+ else:
+ user = password = None
+ return scheme, user, password, hostport
+
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
@@ -577,76 +651,27 @@ class ProxyHandler(BaseHandler):
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
- type, r_type = splittype(proxy)
- if not type or r_type.isdigit():
- # proxy is specified without protocol
- type = orig_type
- host = proxy
- else:
- host, r_host = splithost(r_type)
- user_pass, host = splituser(host)
- user, password = splitpasswd(user_pass)
+ proxy_type, user, password, hostport = _parse_proxy(proxy)
+ if proxy_type is None:
+ proxy_type = orig_type
if user and password:
- user, password = user_pass.split(':', 1)
- user_pass = base64.encodestring('%s:%s' % (unquote(user),
- unquote(password))).strip()
- req.add_header('Proxy-authorization', 'Basic ' + user_pass)
- host = unquote(host)
- req.set_proxy(host, type)
- if orig_type == type:
+ user_pass = '%s:%s' % (unquote(user), unquote(password))
+ creds = base64.encodestring(user_pass).strip()
+ req.add_header('Proxy-authorization', 'Basic ' + creds)
+ hostport = unquote(hostport)
+ req.set_proxy(hostport, proxy_type)
+ if orig_type == proxy_type:
# let other handlers take care of it
- # XXX this only makes sense if the proxy is before the
- # other handlers
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
+ # e.g. if we have a constructor arg proxies like so:
+ # {'http': 'ftp://proxy.example.com'}, we may end up turning
+ # a request for http://acme.example.com/a into one for
+ # ftp://proxy.example.com/a
return self.parent.open(req)
-# feature suggested by Duncan Booth
-# XXX custom is not a good name
-class CustomProxy:
- # either pass a function to the constructor or override handle
- def __init__(self, proto, func=None, proxy_addr=None):
- self.proto = proto
- self.func = func
- self.addr = proxy_addr
-
- def handle(self, req):
- if self.func and self.func(req):
- return 1
-
- def get_proxy(self):
- return self.addr
-
-class CustomProxyHandler(BaseHandler):
- # Proxies must be in front
- handler_order = 100
-
- def __init__(self, *proxies):
- self.proxies = {}
-
- def proxy_open(self, req):
- proto = req.get_type()
- try:
- proxies = self.proxies[proto]
- except KeyError:
- return None
- for p in proxies:
- if p.handle(req):
- req.set_proxy(p.get_proxy())
- return self.parent.open(req)
- return None
-
- def do_proxy(self, p, req):
- return self.parent.open(req)
-
- def add_proxy(self, cpo):
- if cpo.proto in self.proxies:
- self.proxies[cpo.proto].append(cpo)
- else:
- self.proxies[cpo.proto] = [cpo]
-
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
@@ -1128,8 +1153,11 @@ class FileHandler(BaseHandler):
names = None
def get_names(self):
if FileHandler.names is None:
- FileHandler.names = (socket.gethostbyname('localhost'),
- socket.gethostbyname(socket.gethostname()))
+ try:
+ FileHandler.names = (socket.gethostbyname('localhost'),
+ socket.gethostbyname(socket.gethostname()))
+ except socket.gaierror:
+ FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
@@ -1258,6 +1286,7 @@ class CacheFTPHandler(FTPHandler):
class GopherHandler(BaseHandler):
def gopher_open(self, req):
+ # XXX can raise socket.error
import gopherlib # this raises DeprecationWarning in 2.5
host = req.get_host()
if not host:
@@ -1273,25 +1302,3 @@ class GopherHandler(BaseHandler):
else:
fp = gopherlib.send_selector(selector, host)
return addinfourl(fp, noheaders(), req.get_full_url())
-
-#bleck! don't use this yet
-class OpenerFactory:
-
- default_handlers = [UnknownHandler, HTTPHandler,
- HTTPDefaultErrorHandler, HTTPRedirectHandler,
- FTPHandler, FileHandler]
- handlers = []
- replacement_handlers = []
-
- def add_handler(self, h):
- self.handlers = self.handlers + [h]
-
- def replace_handler(self, h):
- pass
-
- def build_opener(self):
- opener = OpenerDirector()
- for ph in self.default_handlers:
- if inspect.isclass(ph):
- ph = ph()
- opener.add_handler(ph)
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index 8b75051..eade040 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -16,12 +16,12 @@ uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
- 'telnet', 'wais', 'imap', 'snews', 'sip']
+ 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
- 'https', 'shttp', 'rtsp', 'rtspu', 'sip',
+ 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
- 'gopher', 'rtsp', 'rtspu', 'sip', '']
+ 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
@@ -41,7 +41,111 @@ def clear_cache():
_parse_cache = {}
-def urlparse(url, scheme='', allow_fragments=1):
+class BaseResult(tuple):
+ """Base class for the parsed result objects.
+
+ This provides the attributes shared by the two derived result
+ objects as read-only properties. The derived classes are
+ responsible for checking the right number of arguments were
+ supplied to the constructor.
+
+ """
+
+ __slots__ = ()
+
+ # Attributes that access the basic components of the URL:
+
+ @property
+ def scheme(self):
+ return self[0]
+
+ @property
+ def netloc(self):
+ return self[1]
+
+ @property
+ def path(self):
+ return self[2]
+
+ @property
+ def query(self):
+ return self[-2]
+
+ @property
+ def fragment(self):
+ return self[-1]
+
+ # Additional attributes that provide access to parsed-out portions
+ # of the netloc:
+
+ @property
+ def username(self):
+ netloc = self.netloc
+ if "@" in netloc:
+ userinfo = netloc.split("@", 1)[0]
+ if ":" in userinfo:
+ userinfo = userinfo.split(":", 1)[0]
+ return userinfo
+ return None
+
+ @property
+ def password(self):
+ netloc = self.netloc
+ if "@" in netloc:
+ userinfo = netloc.split("@", 1)[0]
+ if ":" in userinfo:
+ return userinfo.split(":", 1)[1]
+ return None
+
+ @property
+ def hostname(self):
+ netloc = self.netloc
+ if "@" in netloc:
+ netloc = netloc.split("@", 1)[1]
+ if ":" in netloc:
+ netloc = netloc.split(":", 1)[0]
+ return netloc.lower() or None
+
+ @property
+ def port(self):
+ netloc = self.netloc
+ if "@" in netloc:
+ netloc = netloc.split("@", 1)[1]
+ if ":" in netloc:
+ port = netloc.split(":", 1)[1]
+ return int(port, 10)
+ return None
+
+
+class SplitResult(BaseResult):
+
+ __slots__ = ()
+
+ def __new__(cls, scheme, netloc, path, query, fragment):
+ return BaseResult.__new__(
+ cls, (scheme, netloc, path, query, fragment))
+
+ def geturl(self):
+ return urlunsplit(self)
+
+
+class ParseResult(BaseResult):
+
+ __slots__ = ()
+
+ def __new__(cls, scheme, netloc, path, params, query, fragment):
+ return BaseResult.__new__(
+ cls, (scheme, netloc, path, params, query, fragment))
+
+ @property
+ def params(self):
+ return self[3]
+
+ def geturl(self):
+ return urlunparse(self)
+
+
+def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
@@ -53,7 +157,7 @@ def urlparse(url, scheme='', allow_fragments=1):
url, params = _splitparams(url)
else:
params = ''
- return scheme, netloc, url, params, query, fragment
+ return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
@@ -73,12 +177,13 @@ def _splitnetloc(url, start=0):
delim = len(url)
return url[start:delim], url[delim:]
-def urlsplit(url, scheme='', allow_fragments=1):
+def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
+ allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments
cached = _parse_cache.get(key, None)
if cached:
@@ -97,9 +202,9 @@ def urlsplit(url, scheme='', allow_fragments=1):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
- tuple = scheme, netloc, url, query, fragment
- _parse_cache[key] = tuple
- return tuple
+ v = SplitResult(scheme, netloc, url, query, fragment)
+ _parse_cache[key] = v
+ return v
for c in url[:i]:
if c not in scheme_chars:
break
@@ -111,9 +216,9 @@ def urlsplit(url, scheme='', allow_fragments=1):
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
- tuple = scheme, netloc, url, query, fragment
- _parse_cache[key] = tuple
- return tuple
+ v = SplitResult(scheme, netloc, url, query, fragment)
+ _parse_cache[key] = v
+ return v
def urlunparse((scheme, netloc, url, params, query, fragment)):
"""Put a parsed URL back together again. This may result in a
@@ -136,7 +241,7 @@ def urlunsplit((scheme, netloc, url, query, fragment)):
url = url + '#' + fragment
return url
-def urljoin(base, url, allow_fragments = 1):
+def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
diff --git a/Lib/uu.py b/Lib/uu.py
index 40e8bf0..3ccedb0 100755
--- a/Lib/uu.py
+++ b/Lib/uu.py
@@ -132,7 +132,7 @@ def decode(in_file, out_file=None, mode=None, quiet=0):
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
- nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
+ nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
@@ -151,7 +151,7 @@ def test():
(options, args) = parser.parse_args()
if len(args) > 2:
- p.error('incorrect number of arguments')
+ parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin
diff --git a/Lib/warnings.py b/Lib/warnings.py
index e622b9a..bc0b818 100644
--- a/Lib/warnings.py
+++ b/Lib/warnings.py
@@ -58,10 +58,11 @@ def warn(message, category=None, stacklevel=1):
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
- warn_explicit(message, category, filename, lineno, module, registry)
+ warn_explicit(message, category, filename, lineno, module, registry,
+ globals)
def warn_explicit(message, category, filename, lineno,
- module=None, registry=None):
+ module=None, registry=None, module_globals=None):
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
@@ -92,6 +93,11 @@ def warn_explicit(message, category, filename, lineno,
if action == "ignore":
registry[key] = 1
return
+
+ # Prime the linecache for formatting, in case the
+ # "file" is actually in a zipfile or something.
+ linecache.getlines(filename, module_globals)
+
if action == "error":
raise message
# Other actions
diff --git a/Lib/xmlcore/dom/expatbuilder.py b/Lib/xmlcore/dom/expatbuilder.py
index 81d9c2b..32ffa41 100644
--- a/Lib/xmlcore/dom/expatbuilder.py
+++ b/Lib/xmlcore/dom/expatbuilder.py
@@ -59,7 +59,7 @@ _typeinfo_map = {
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
-class ElementInfo(NewStyle):
+class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
@@ -460,7 +460,7 @@ class ExpatBuilder:
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
-class FilterVisibilityController(NewStyle):
+class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
@@ -518,7 +518,7 @@ class FilterVisibilityController(NewStyle):
}
-class FilterCrutch(NewStyle):
+class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
@@ -908,7 +908,7 @@ class InternalSubsetExtractor(ExpatBuilder):
raise ParseEscape()
-def parse(file, namespaces=1):
+def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
@@ -929,7 +929,7 @@ def parse(file, namespaces=1):
return result
-def parseString(string, namespaces=1):
+def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
@@ -940,7 +940,7 @@ def parseString(string, namespaces=1):
return builder.parseString(string)
-def parseFragment(file, context, namespaces=1):
+def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
@@ -963,7 +963,7 @@ def parseFragment(file, context, namespaces=1):
return result
-def parseFragmentString(string, context, namespaces=1):
+def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
diff --git a/Lib/xmlcore/dom/minicompat.py b/Lib/xmlcore/dom/minicompat.py
index 364ca45..f99b7fe 100644
--- a/Lib/xmlcore/dom/minicompat.py
+++ b/Lib/xmlcore/dom/minicompat.py
@@ -4,10 +4,6 @@
#
# The following names are defined:
#
-# isinstance -- version of the isinstance() function that accepts
-# tuples as the second parameter regardless of the
-# Python version
-#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guarateed to
@@ -15,8 +11,6 @@
#
# StringTypes -- tuple of defined string types
#
-# GetattrMagic -- base class used to make _get_<attr> be magically
-# invoked when available
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
@@ -41,14 +35,8 @@
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
-#
-# NewStyle -- base class to cause __slots__ to be honored in
-# the new world
-#
-# True, False -- only for Python 2.2 and earlier
-__all__ = ["NodeList", "EmptyNodeList", "NewStyle",
- "StringTypes", "defproperty", "GetattrMagic"]
+__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xmlcore.dom
@@ -60,125 +48,62 @@ else:
StringTypes = type(''), type(unicode(''))
-# define True and False only if not defined as built-ins
-try:
- True
-except NameError:
- True = 1
- False = 0
- __all__.extend(["True", "False"])
+class NodeList(list):
+ __slots__ = ()
+ def item(self, index):
+ if 0 <= index < len(self):
+ return self[index]
-try:
- isinstance('', StringTypes)
-except TypeError:
- #
- # Wrap isinstance() to make it compatible with the version in
- # Python 2.2 and newer.
- #
- _isinstance = isinstance
- def isinstance(obj, type_or_seq):
- try:
- return _isinstance(obj, type_or_seq)
- except TypeError:
- for t in type_or_seq:
- if _isinstance(obj, t):
- return 1
- return 0
- __all__.append("isinstance")
-
-
-if list is type([]):
- class NodeList(list):
- __slots__ = ()
-
- def item(self, index):
- if 0 <= index < len(self):
- return self[index]
-
- def _get_length(self):
- return len(self)
-
- def _set_length(self, value):
- raise xmlcore.dom.NoModificationAllowedErr(
- "attempt to modify read-only attribute 'length'")
-
- length = property(_get_length, _set_length,
- doc="The number of nodes in the NodeList.")
-
- def __getstate__(self):
- return list(self)
-
- def __setstate__(self, state):
- self[:] = state
-
- class EmptyNodeList(tuple):
- __slots__ = ()
-
- def __add__(self, other):
- NL = NodeList()
- NL.extend(other)
- return NL
-
- def __radd__(self, other):
- NL = NodeList()
- NL.extend(other)
- return NL
-
- def item(self, index):
- return None
-
- def _get_length(self):
- return 0
-
- def _set_length(self, value):
- raise xmlcore.dom.NoModificationAllowedErr(
- "attempt to modify read-only attribute 'length'")
-
- length = property(_get_length, _set_length,
- doc="The number of nodes in the NodeList.")
+ def _get_length(self):
+ return len(self)
-else:
- def NodeList():
- return []
+ def _set_length(self, value):
+ raise xml.dom.NoModificationAllowedErr(
+ "attempt to modify read-only attribute 'length'")
- def EmptyNodeList():
- return []
+ length = property(_get_length, _set_length,
+ doc="The number of nodes in the NodeList.")
+ def __getstate__(self):
+ return list(self)
-try:
- property
-except NameError:
- def defproperty(klass, name, doc):
- # taken care of by the base __getattr__()
- pass
+ def __setstate__(self, state):
+ self[:] = state
- class GetattrMagic:
- def __getattr__(self, key):
- if key.startswith("_"):
- raise AttributeError, key
+class EmptyNodeList(tuple):
+ __slots__ = ()
- try:
- get = getattr(self, "_get_" + key)
- except AttributeError:
- raise AttributeError, key
- return get()
+ def __add__(self, other):
+ NL = NodeList()
+ NL.extend(other)
+ return NL
- class NewStyle:
- pass
+ def __radd__(self, other):
+ NL = NodeList()
+ NL.extend(other)
+ return NL
-else:
- def defproperty(klass, name, doc):
- get = getattr(klass, ("_get_" + name)).im_func
- def set(self, value, name=name):
- raise xmlcore.dom.NoModificationAllowedErr(
- "attempt to modify read-only attribute " + repr(name))
- assert not hasattr(klass, "_set_" + name), \
- "expected not to find _set_" + name
- prop = property(get, set, doc=doc)
- setattr(klass, name, prop)
-
- class GetattrMagic:
- pass
-
- NewStyle = object
+ def item(self, index):
+ return None
+
+ def _get_length(self):
+ return 0
+
+ def _set_length(self, value):
+ raise xml.dom.NoModificationAllowedErr(
+ "attempt to modify read-only attribute 'length'")
+
+ length = property(_get_length, _set_length,
+ doc="The number of nodes in the NodeList.")
+
+
+def defproperty(klass, name, doc):
+ get = getattr(klass, ("_get_" + name)).im_func
+ def set(self, value, name=name):
+ raise xml.dom.NoModificationAllowedErr(
+ "attempt to modify read-only attribute " + repr(name))
+ assert not hasattr(klass, "_set_" + name), \
+ "expected not to find _set_" + name
+ prop = property(get, set, doc=doc)
+ setattr(klass, name, prop)
diff --git a/Lib/xmlcore/dom/minidom.py b/Lib/xmlcore/dom/minidom.py
index 54620e1..a8abd14 100644
--- a/Lib/xmlcore/dom/minidom.py
+++ b/Lib/xmlcore/dom/minidom.py
@@ -20,8 +20,6 @@ from xmlcore.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xmlcore.dom.minicompat import *
from xmlcore.dom.xmlbuilder import DOMImplementationLS, DocumentLS
-_TupleType = type(())
-
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
@@ -31,7 +29,7 @@ _nodeTypes_with_children = (xmlcore.dom.Node.ELEMENT_NODE,
xmlcore.dom.Node.ENTITY_REFERENCE_NODE)
-class Node(xmlcore.dom.Node, GetattrMagic):
+class Node(xmlcore.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
@@ -459,7 +457,7 @@ defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
-class NamedNodeMap(NewStyle, GetattrMagic):
+class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
@@ -523,7 +521,7 @@ class NamedNodeMap(NewStyle, GetattrMagic):
return cmp(id(self), id(other))
def __getitem__(self, attname_or_tuple):
- if isinstance(attname_or_tuple, _TupleType):
+ if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
@@ -613,7 +611,7 @@ defproperty(NamedNodeMap, "length",
AttributeList = NamedNodeMap
-class TypeInfo(NewStyle):
+class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
@@ -1146,7 +1144,7 @@ class CDATASection(Text):
writer.write("<![CDATA[%s]]>" % self.data)
-class ReadOnlySequentialNamedNodeMap(NewStyle, GetattrMagic):
+class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
@@ -1170,7 +1168,7 @@ class ReadOnlySequentialNamedNodeMap(NewStyle, GetattrMagic):
return n
def __getitem__(self, name_or_tuple):
- if isinstance(name_or_tuple, _TupleType):
+ if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
@@ -1418,7 +1416,7 @@ class DOMImplementation(DOMImplementationLS):
def _create_document(self):
return Document()
-class ElementInfo(NewStyle):
+class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
diff --git a/Lib/xmlcore/dom/xmlbuilder.py b/Lib/xmlcore/dom/xmlbuilder.py
index d58c723..6566d3c 100644
--- a/Lib/xmlcore/dom/xmlbuilder.py
+++ b/Lib/xmlcore/dom/xmlbuilder.py
@@ -3,8 +3,6 @@
import copy
import xmlcore.dom
-from xmlcore.dom.minicompat import *
-
from xmlcore.dom.NodeFilter import NodeFilter
@@ -211,7 +209,7 @@ def _name_xform(name):
return name.lower().replace('-', '_')
-class DOMEntityResolver(NewStyle):
+class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
@@ -255,7 +253,7 @@ class DOMEntityResolver(NewStyle):
return param.split("=", 1)[1].lower()
-class DOMInputSource(NewStyle):
+class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')